+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i1> %2,
- i32 %3)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i1> %2,
- i32 %3)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i1> %2,
- i32 %3)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i1> %2,
- i32 %3)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- <vscale x 64 x i1> %2,
- i32 %3)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- <vscale x 32 x i1> %2,
- i32 %3)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 -9,
- <vscale x 1 x i1> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 9,
- <vscale x 2 x i1> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 -9,
- <vscale x 4 x i1> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 9,
- <vscale x 8 x i1> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 -9,
- <vscale x 16 x i1> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 9,
- <vscale x 32 x i1> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 -9,
- <vscale x 64 x i1> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 9,
- <vscale x 1 x i1> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 -9,
- <vscale x 2 x i1> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 9,
- <vscale x 4 x i1> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 -9,
- <vscale x 8 x i1> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 9,
- <vscale x 16 x i1> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 -9,
- <vscale x 32 x i1> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 9,
- <vscale x 1 x i1> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 -9,
- <vscale x 2 x i1> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 9,
- <vscale x 4 x i1> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 -9,
- <vscale x 8 x i1> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 9,
- <vscale x 16 x i1> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 9,
- <vscale x 1 x i1> %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 -9,
- <vscale x 2 x i1> %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 9,
- <vscale x 4 x i1> %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 -9,
- <vscale x 8 x i1> %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
<vscale x 64 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 32 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> %0,
i8 %1,
<vscale x 64 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> %0,
i16 %1,
<vscale x 32 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> %0,
i32 %1,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vadc.vvm v8, v8, v9, v0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vadc.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vadc.vvm v8, v8, v10, v0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vadc.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vadc.vvm v8, v8, v12, v0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vadc.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vadc.vvm v8, v8, v16, v0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vadc.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i64> %a
}
-define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
+; CHECK-NEXT: vadc.vim v8, v8, -9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 1 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
+; CHECK-NEXT: vadc.vim v8, v8, 9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
- i8 -9,
+ i8 9,
<vscale x 2 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
+; CHECK-NEXT: vadc.vim v8, v8, -9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 4 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
+; CHECK-NEXT: vadc.vim v8, v8, 9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
- i8 -9,
+ i8 9,
<vscale x 8 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
+; CHECK-NEXT: vadc.vim v8, v8, -9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 16 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
+; CHECK-NEXT: vadc.vim v8, v8, 9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
- i8 -9,
+ i8 9,
<vscale x 32 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
+; CHECK-NEXT: vadc.vim v8, v8, -9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 64 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
-define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
+; CHECK-NEXT: vadc.vim v8, v8, 9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
- i16 -9,
+ i16 9,
<vscale x 1 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
+; CHECK-NEXT: vadc.vim v8, v8, -9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 2 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
+; CHECK-NEXT: vadc.vim v8, v8, 9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
- i16 -9,
+ i16 9,
<vscale x 4 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
+; CHECK-NEXT: vadc.vim v8, v8, -9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 8 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, -9, v0
+; CHECK-NEXT: vadc.vim v8, v8, 9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
- i16 -9,
+ i16 9,
<vscale x 16 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vadc.vim v8, v8, 9, v0
+; CHECK-NEXT: vadc.vim v8, v8, -9, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 32 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
-define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
i32 -9,
<vscale x 1 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> %0,
i32 9,
<vscale x 2 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> %0,
i32 -9,
<vscale x 4 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> %0,
i32 9,
<vscale x 8 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> %0,
i32 -9,
<vscale x 16 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
-define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> %0,
i64 9,
<vscale x 1 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> %0,
i64 -9,
<vscale x 2 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> %0,
i64 9,
<vscale x 4 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> %0,
i64 -9,
<vscale x 8 x i1> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vand.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 9,
- <vscale x 64 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vand.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT: vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vand.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vand.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vand.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vand.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vand.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vand.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vand.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vand.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vand.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vand.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vand.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vand.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vand.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vand.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vand.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 9,
<vscale x 64 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 9,
<vscale x 32 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vdiv.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vdiv.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vdiv.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vdiv.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vdiv.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vdiv.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vdiv.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vdiv.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vdiv.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vdiv.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vdiv.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vdiv.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vdiv.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vdiv.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vdiv.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vdiv.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vdiv.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vdiv.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vdiv.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vdiv.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vdiv.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vdiv.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vdivu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vdivu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vdivu.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vdivu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vdivu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vdivu.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vdivu.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vdivu.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vdivu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vdivu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vdivu.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vdivu.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vdivu.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vdivu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vdivu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vdivu.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vdivu.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vdivu.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vdivu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vdivu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vdivu.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vdivu.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x i64>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x i64>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 1 x i64> %b
-}
-
-declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x i64>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x i64>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 1 x i64> %b
-}
-
-declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 2 x i64>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x i64>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 2 x i64> %b
-}
-
-declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 2 x i64>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x i64>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 2 x i64> %b
-}
-
-declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 4 x i64>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x i64>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 4 x i64> %b
-}
-
-declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 4 x i64>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x i64>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 4 x i64> %b
-}
-
-declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 8 x i64>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x i64>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 8 x i64> %b
-}
-
-declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 8 x i64>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x i64>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 8 x i64> %b
-}
-
-declare { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- i32);
-
-define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double> undef,
- <vscale x 1 x double>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x double>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x double>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 1 x double> %b
-}
-
-declare { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x double>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x double>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 1 x double> %b
-}
-
-declare { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.nxv2f64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- i32);
-
-define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.nxv2f64(
- <vscale x 2 x double> undef,
- <vscale x 2 x double>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 2 x double>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x double>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 2 x double> %b
-}
-
-declare { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.mask.nxv2f64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.mask.nxv2f64(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 2 x double>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x double>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 2 x double> %b
-}
-
-declare { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.nxv4f64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- i32);
-
-define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.nxv4f64(
- <vscale x 4 x double> undef,
- <vscale x 4 x double>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 4 x double>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x double>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 4 x double> %b
-}
-
-declare { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.mask.nxv4f64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.mask.nxv4f64(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 4 x double>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x double>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 4 x double> %b
-}
-
-declare { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.nxv8f64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- i32);
-
-define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.nxv8f64(
- <vscale x 8 x double> undef,
- <vscale x 8 x double>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 8 x double>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x double>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 8 x double> %b
-}
-
-declare { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.mask.nxv8f64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.mask.nxv8f64(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 8 x double>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x double>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 8 x double> %b
-}
-
-declare { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x i32>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 1 x i32> %b
-}
-
-declare { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x i32>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 1 x i32> %b
-}
-
-declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 2 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x i32>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 2 x i32> %b
-}
-
-declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 2 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x i32>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 2 x i32> %b
-}
-
-declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 4 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x i32>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 4 x i32> %b
-}
-
-declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 4 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x i32>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 4 x i32> %b
-}
-
-declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 8 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x i32>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 8 x i32> %b
-}
-
-declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 8 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x i32>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 8 x i32> %b
-}
-
-declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 16 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x i32>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 16 x i32> %b
-}
-
-declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 16 x i32>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x i32>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 16 x i32> %b
-}
-
-declare { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.nxv1f32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- i32);
-
-define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.nxv1f32(
- <vscale x 1 x float> undef,
- <vscale x 1 x float>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x float>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 1 x float> %b
-}
-
-declare { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.mask.nxv1f32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.mask.nxv1f32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x float>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 1 x float> %b
-}
-
-declare { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.nxv2f32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- i32);
-
-define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.nxv2f32(
- <vscale x 2 x float> undef,
- <vscale x 2 x float>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 2 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x float>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 2 x float> %b
-}
-
-declare { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.mask.nxv2f32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.mask.nxv2f32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 2 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x float>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 2 x float> %b
-}
-
-declare { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.nxv4f32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- i32);
-
-define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.nxv4f32(
- <vscale x 4 x float> undef,
- <vscale x 4 x float>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 4 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x float>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 4 x float> %b
-}
-
-declare { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.mask.nxv4f32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.mask.nxv4f32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 4 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x float>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 4 x float> %b
-}
-
-declare { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.nxv8f32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- i32);
-
-define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.nxv8f32(
- <vscale x 8 x float> undef,
- <vscale x 8 x float>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 8 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x float>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 8 x float> %b
-}
-
-declare { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.mask.nxv8f32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.mask.nxv8f32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 8 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x float>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 8 x float> %b
-}
-
-declare { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.nxv16f32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- i32);
-
-define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.nxv16f32(
- <vscale x 16 x float> undef,
- <vscale x 16 x float>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 16 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x float>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 16 x float> %b
-}
-
-declare { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.mask.nxv16f32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.mask.nxv16f32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 16 x float>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x float>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 16 x float> %b
-}
-
-declare { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x i16>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 1 x i16> %b
-}
-
-declare { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.mask.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x i16>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 1 x i16> %b
-}
-
-declare { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 2 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x i16>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 2 x i16> %b
-}
-
-declare { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.mask.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 2 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x i16>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 2 x i16> %b
-}
-
-declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 4 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x i16>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 4 x i16> %b
-}
-
-declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 4 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x i16>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 4 x i16> %b
-}
-
-declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 8 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x i16>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 8 x i16> %b
-}
-
-declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 8 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x i16>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 8 x i16> %b
-}
-
-declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 16 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x i16>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 16 x i16> %b
-}
-
-declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 16 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x i16>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 16 x i16> %b
-}
-
-declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 32 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 32 x i16>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 32 x i16> %b
-}
-
-declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 32 x i16>, i32 } %a, 0
- %c = extractvalue { <vscale x 32 x i16>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 32 x i16> %b
-}
-
-declare { <vscale x 1 x half>, i32 } @llvm.riscv.vleff.nxv1f16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- i32);
-
-define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(<vscale x 1 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x half>, i32 } @llvm.riscv.vleff.nxv1f16(
- <vscale x 1 x half> undef,
- <vscale x 1 x half>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x half>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 1 x half> %b
-}
-
-declare { <vscale x 1 x half>, i32 } @llvm.riscv.vleff.mask.nxv1f16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x half>, i32 } @llvm.riscv.vleff.mask.nxv1f16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x half>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 1 x half> %b
-}
-
-declare { <vscale x 2 x half>, i32 } @llvm.riscv.vleff.nxv2f16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- i32);
-
-define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(<vscale x 2 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x half>, i32 } @llvm.riscv.vleff.nxv2f16(
- <vscale x 2 x half> undef,
- <vscale x 2 x half>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 2 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x half>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 2 x half> %b
-}
-
-declare { <vscale x 2 x half>, i32 } @llvm.riscv.vleff.mask.nxv2f16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x half>, i32 } @llvm.riscv.vleff.mask.nxv2f16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 2 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x half>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 2 x half> %b
-}
-
-declare { <vscale x 4 x half>, i32 } @llvm.riscv.vleff.nxv4f16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- i32);
-
-define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(<vscale x 4 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x half>, i32 } @llvm.riscv.vleff.nxv4f16(
- <vscale x 4 x half> undef,
- <vscale x 4 x half>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 4 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x half>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 4 x half> %b
-}
-
-declare { <vscale x 4 x half>, i32 } @llvm.riscv.vleff.mask.nxv4f16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x half>, i32 } @llvm.riscv.vleff.mask.nxv4f16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 4 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x half>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 4 x half> %b
-}
-
-declare { <vscale x 8 x half>, i32 } @llvm.riscv.vleff.nxv8f16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- i32);
-
-define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(<vscale x 8 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x half>, i32 } @llvm.riscv.vleff.nxv8f16(
- <vscale x 8 x half> undef,
- <vscale x 8 x half>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 8 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x half>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 8 x half> %b
-}
-
-declare { <vscale x 8 x half>, i32 } @llvm.riscv.vleff.mask.nxv8f16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x half>, i32 } @llvm.riscv.vleff.mask.nxv8f16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 8 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x half>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 8 x half> %b
-}
-
-declare { <vscale x 16 x half>, i32 } @llvm.riscv.vleff.nxv16f16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- i32);
-
-define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(<vscale x 16 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x half>, i32 } @llvm.riscv.vleff.nxv16f16(
- <vscale x 16 x half> undef,
- <vscale x 16 x half>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 16 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x half>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 16 x half> %b
-}
-
-declare { <vscale x 16 x half>, i32 } @llvm.riscv.vleff.mask.nxv16f16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x half>, i32 } @llvm.riscv.vleff.mask.nxv16f16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 16 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x half>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 16 x half> %b
-}
-
-declare { <vscale x 32 x half>, i32 } @llvm.riscv.vleff.nxv32f16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- i32);
-
-define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(<vscale x 32 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x half>, i32 } @llvm.riscv.vleff.nxv32f16(
- <vscale x 32 x half> undef,
- <vscale x 32 x half>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 32 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 32 x half>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 32 x half> %b
-}
-
-declare { <vscale x 32 x half>, i32 } @llvm.riscv.vleff.mask.nxv32f16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x half>, i32 } @llvm.riscv.vleff.mask.nxv32f16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 32 x half>, i32 } %a, 0
- %c = extractvalue { <vscale x 32 x half>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 32 x half> %b
-}
-
-declare { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x i8>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 1 x i8> %b
-}
-
-declare { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.mask.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 1 x i8>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 1 x i8> %b
-}
-
-declare { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 2 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x i8>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 2 x i8> %b
-}
-
-declare { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.mask.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 2 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 2 x i8>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 2 x i8> %b
-}
-
-declare { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 4 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x i8>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 4 x i8> %b
-}
-
-declare { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.mask.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 4 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 4 x i8>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 4 x i8> %b
-}
-
-declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 8 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x i8>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 8 x i8> %b
-}
-
-declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 8 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 8 x i8>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 8 x i8> %b
-}
-
-declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 16 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x i8>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 16 x i8> %b
-}
-
-declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 16 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 16 x i8>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 16 x i8> %b
-}
-
-declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 32 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 32 x i8>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 32 x i8> %b
-}
-
-declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 32 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 32 x i8>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 32 x i8> %b
-}
-
-declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 64 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 64 x i8>, i32 } %a, 1
- store i32 %c, i32* %2
- ret <vscale x 64 x i8> %b
-}
-
-declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 64 x i8>, i32 } %a, 0
- %c = extractvalue { <vscale x 64 x i8>, i32 } %a, 1
- store i32 %c, i32* %4
-
- ret <vscale x 64 x i8> %b
-}
-
-; Test with the VL output unused
-define <vscale x 1 x double> @intrinsic_vleff_dead_vl(<vscale x 1 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_vl:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double> undef,
- <vscale x 1 x double>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x double>, i32 } %a, 0
- ret <vscale x 1 x double> %b
-}
-
-define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x double>, i32 } %a, 0
-
- ret <vscale x 1 x double> %b
-}
-
-; Test with the loaded value unused
-define void @intrinsic_vleff_dead_value(<vscale x 1 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_value:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double> undef,
- <vscale x 1 x double>* %0,
- i32 %1)
- %b = extractvalue { <vscale x 1 x double>, i32 } %a, 1
- store i32 %b, i32* %2
- ret void
-}
-
-define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_value:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
- %b = extractvalue { <vscale x 1 x double>, i32 } %a, 1
- store i32 %b, i32* %4
-
- ret void
-}
-
-; Test with both outputs dead. Make sure the vleff isn't deleted.
-define void @intrinsic_vleff_dead_all(<vscale x 1 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_all:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double> undef,
- <vscale x 1 x double>* %0,
- i32 %1)
- ret void
-}
-
-define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret void
-}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x i64>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x i64>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 1 x i64> %b
-}
-
-declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.mask.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.mask.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x i64>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x i64>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 1 x i64> %b
-}
-
-declare { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 2 x i64>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x i64>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 2 x i64> %b
-}
-
-declare { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.mask.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.mask.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 2 x i64>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x i64>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 2 x i64> %b
-}
-
-declare { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 4 x i64>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x i64>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 4 x i64> %b
-}
-
-declare { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.mask.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.mask.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 4 x i64>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x i64>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 4 x i64> %b
-}
-
-declare { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 8 x i64>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x i64>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 8 x i64> %b
-}
-
-declare { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.mask.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.mask.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 8 x i64>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x i64>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 8 x i64> %b
-}
-
-declare { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double> undef,
- <vscale x 1 x double>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x double>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x double>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 1 x double> %b
-}
-
-declare { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x double>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x double>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 1 x double> %b
-}
-
-declare { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.nxv2f64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.nxv2f64(
- <vscale x 2 x double> undef,
- <vscale x 2 x double>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 2 x double>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x double>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 2 x double> %b
-}
-
-declare { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.mask.nxv2f64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.mask.nxv2f64(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 2 x double>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x double>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 2 x double> %b
-}
-
-declare { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.nxv4f64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.nxv4f64(
- <vscale x 4 x double> undef,
- <vscale x 4 x double>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 4 x double>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x double>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 4 x double> %b
-}
-
-declare { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.mask.nxv4f64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.mask.nxv4f64(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 4 x double>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x double>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 4 x double> %b
-}
-
-declare { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.nxv8f64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.nxv8f64(
- <vscale x 8 x double> undef,
- <vscale x 8 x double>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 8 x double>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x double>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 8 x double> %b
-}
-
-declare { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.mask.nxv8f64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.mask.nxv8f64(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 8 x double>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x double>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 8 x double> %b
-}
-
-declare { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x i32>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 1 x i32> %b
-}
-
-declare { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x i32>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 1 x i32> %b
-}
-
-declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x i32>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 2 x i32> %b
-}
-
-declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.mask.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x i32>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 2 x i32> %b
-}
-
-declare { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 4 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x i32>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 4 x i32> %b
-}
-
-declare { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.mask.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 4 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x i32>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 4 x i32> %b
-}
-
-declare { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 8 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x i32>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 8 x i32> %b
-}
-
-declare { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.mask.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 8 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x i32>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 8 x i32> %b
-}
-
-declare { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 16 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x i32>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 16 x i32> %b
-}
-
-declare { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.mask.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 16 x i32>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x i32>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 16 x i32> %b
-}
-
-declare { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32(
- <vscale x 1 x float> undef,
- <vscale x 1 x float>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x float>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 1 x float> %b
-}
-
-declare { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.mask.nxv1f32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.mask.nxv1f32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x float>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 1 x float> %b
-}
-
-declare { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.nxv2f32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.nxv2f32(
- <vscale x 2 x float> undef,
- <vscale x 2 x float>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 2 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x float>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 2 x float> %b
-}
-
-declare { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.mask.nxv2f32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.mask.nxv2f32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 2 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x float>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 2 x float> %b
-}
-
-declare { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.nxv4f32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.nxv4f32(
- <vscale x 4 x float> undef,
- <vscale x 4 x float>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 4 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x float>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 4 x float> %b
-}
-
-declare { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.mask.nxv4f32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.mask.nxv4f32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 4 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x float>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 4 x float> %b
-}
-
-declare { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.nxv8f32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.nxv8f32(
- <vscale x 8 x float> undef,
- <vscale x 8 x float>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 8 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x float>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 8 x float> %b
-}
-
-declare { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.mask.nxv8f32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.mask.nxv8f32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 8 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x float>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 8 x float> %b
-}
-
-declare { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.nxv16f32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- i64);
-
-define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.nxv16f32(
- <vscale x 16 x float> undef,
- <vscale x 16 x float>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 16 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x float>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 16 x float> %b
-}
-
-declare { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.mask.nxv16f32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.mask.nxv16f32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 16 x float>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x float>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 16 x float> %b
-}
-
-declare { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x i16>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 1 x i16> %b
-}
-
-declare { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.mask.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x i16>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 1 x i16> %b
-}
-
-declare { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 2 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x i16>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 2 x i16> %b
-}
-
-declare { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.mask.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 2 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x i16>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 2 x i16> %b
-}
-
-declare { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 4 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x i16>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 4 x i16> %b
-}
-
-declare { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.mask.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 4 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x i16>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 4 x i16> %b
-}
-
-declare { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 8 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x i16>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 8 x i16> %b
-}
-
-declare { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.mask.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 8 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x i16>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 8 x i16> %b
-}
-
-declare { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 16 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x i16>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 16 x i16> %b
-}
-
-declare { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.mask.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 16 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x i16>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 16 x i16> %b
-}
-
-declare { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 32 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 32 x i16>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 32 x i16> %b
-}
-
-declare { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.mask.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 32 x i16>, i64 } %a, 0
- %c = extractvalue { <vscale x 32 x i16>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 32 x i16> %b
-}
-
-declare { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.nxv1f16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(<vscale x 1 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.nxv1f16(
- <vscale x 1 x half> undef,
- <vscale x 1 x half>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x half>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 1 x half> %b
-}
-
-declare { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.mask.nxv1f16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.mask.nxv1f16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x half>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 1 x half> %b
-}
-
-declare { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.nxv2f16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(<vscale x 2 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.nxv2f16(
- <vscale x 2 x half> undef,
- <vscale x 2 x half>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 2 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x half>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 2 x half> %b
-}
-
-declare { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.mask.nxv2f16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.mask.nxv2f16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 2 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x half>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 2 x half> %b
-}
-
-declare { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.nxv4f16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(<vscale x 4 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.nxv4f16(
- <vscale x 4 x half> undef,
- <vscale x 4 x half>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 4 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x half>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 4 x half> %b
-}
-
-declare { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.mask.nxv4f16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.mask.nxv4f16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 4 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x half>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 4 x half> %b
-}
-
-declare { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.nxv8f16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(<vscale x 8 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.nxv8f16(
- <vscale x 8 x half> undef,
- <vscale x 8 x half>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 8 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x half>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 8 x half> %b
-}
-
-declare { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.mask.nxv8f16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.mask.nxv8f16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 8 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x half>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 8 x half> %b
-}
-
-declare { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.nxv16f16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- i64);
-
-define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(<vscale x 16 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.nxv16f16(
- <vscale x 16 x half> undef,
- <vscale x 16 x half>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 16 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x half>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 16 x half> %b
-}
-
-declare { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.mask.nxv16f16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.mask.nxv16f16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 16 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x half>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 16 x half> %b
-}
-
-declare { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.nxv32f16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- i64);
-
-define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(<vscale x 32 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vle16ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.nxv32f16(
- <vscale x 32 x half> undef,
- <vscale x 32 x half>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 32 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 32 x half>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 32 x half> %b
-}
-
-declare { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.mask.nxv32f16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.mask.nxv32f16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 32 x half>, i64 } %a, 0
- %c = extractvalue { <vscale x 32 x half>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 32 x half> %b
-}
-
-declare { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x i8>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 1 x i8> %b
-}
-
-declare { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 1 x i8>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 1 x i8> %b
-}
-
-declare { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 2 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x i8>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 2 x i8> %b
-}
-
-declare { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 2 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 2 x i8>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 2 x i8> %b
-}
-
-declare { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 4 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x i8>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 4 x i8> %b
-}
-
-declare { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 4 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 4 x i8>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 4 x i8> %b
-}
-
-declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 8 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x i8>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 8 x i8> %b
-}
-
-declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 8 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 8 x i8>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 8 x i8> %b
-}
-
-declare { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 16 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x i8>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 16 x i8> %b
-}
-
-declare { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 16 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 16 x i8>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 16 x i8> %b
-}
-
-declare { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 32 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 32 x i8>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 32 x i8> %b
-}
-
-declare { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 32 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 32 x i8>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 32 x i8> %b
-}
-
-declare { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vle8ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 64 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 64 x i8>, i64 } %a, 1
- store i64 %c, i64* %2
- ret <vscale x 64 x i8> %b
-}
-
-declare { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 64 x i8>, i64 } %a, 0
- %c = extractvalue { <vscale x 64 x i8>, i64 } %a, 1
- store i64 %c, i64* %4
-
- ret <vscale x 64 x i8> %b
-}
-
-; Test with the VL output unused
-define <vscale x 1 x double> @intrinsic_vleff_dead_vl(<vscale x 1 x double>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_vl:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double> undef,
- <vscale x 1 x double>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x double>, i64 } %a, 0
- ret <vscale x 1 x double> %b
-}
-
-define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x double>, i64 } %a, 0
-
- ret <vscale x 1 x double> %b
-}
-
-; Test with the loaded value unused
-define void @intrinsic_vleff_dead_value(<vscale x 1 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_value:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double> undef,
- <vscale x 1 x double>* %0,
- i64 %1)
- %b = extractvalue { <vscale x 1 x double>, i64 } %a, 1
- store i64 %b, i64* %2
- ret void
-}
-
-define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_value:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: csrr a0, vl
-; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- %b = extractvalue { <vscale x 1 x double>, i64 } %a, 1
- store i64 %b, i64* %4
-
- ret void
-}
-
-; Test with both outputs dead. Make sure the vleff isn't deleted.
-define void @intrinsic_vleff_dead_all(<vscale x 1 x double>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_all:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vle64ff.v v8, (a0)
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
- <vscale x 1 x double> undef,
- <vscale x 1 x double>* %0,
- i64 %1)
- ret void
-}
-
-define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret void
-}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
+declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i64>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 1 x i64> %b
+}
+
+declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x i64> %b
+}
+
+declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
+ <vscale x 2 x i64> undef,
+ <vscale x 2 x i64>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 2 x i64> %b
+}
+
+declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 2 x i64> %b
+}
+
+declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
+ <vscale x 4 x i64> undef,
+ <vscale x 4 x i64>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 4 x i64> %b
+}
+
+declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 4 x i64> %b
+}
+
+declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
+ <vscale x 8 x i64> undef,
+ <vscale x 8 x i64>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 8 x i64> %b
+}
+
+declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 8 x i64> %b
+}
+
+declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+ <vscale x 1 x double> undef,
+ <vscale x 1 x double>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 1 x double> %b
+}
+
+declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x double> %b
+}
+
+declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
+ <vscale x 2 x double> undef,
+ <vscale x 2 x double>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 2 x double> %b
+}
+
+declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 2 x double> %b
+}
+
+declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
+ <vscale x 4 x double> undef,
+ <vscale x 4 x double>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 4 x double> %b
+}
+
+declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 4 x double> %b
+}
+
+declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
+ <vscale x 8 x double> undef,
+ <vscale x 8 x double>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 8 x double> %b
+}
+
+declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 8 x double> %b
+}
+
+declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
+ <vscale x 1 x i32> undef,
+ <vscale x 1 x i32>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 1 x i32> %b
+}
+
+declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x i32> %b
+}
+
+declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
+ <vscale x 2 x i32> undef,
+ <vscale x 2 x i32>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 2 x i32> %b
+}
+
+declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 2 x i32> %b
+}
+
+declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
+ <vscale x 4 x i32> undef,
+ <vscale x 4 x i32>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 4 x i32> %b
+}
+
+declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 4 x i32> %b
+}
+
+declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
+ <vscale x 8 x i32> undef,
+ <vscale x 8 x i32>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 8 x i32> %b
+}
+
+declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 8 x i32> %b
+}
+
+declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
+ <vscale x 16 x i32> undef,
+ <vscale x 16 x i32>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 16 x i32> %b
+}
+
+declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 16 x i32> %b
+}
+
+declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
+ <vscale x 1 x float> undef,
+ <vscale x 1 x float>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 1 x float> %b
+}
+
+declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x float> %b
+}
+
+declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
+ <vscale x 2 x float> undef,
+ <vscale x 2 x float>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 2 x float> %b
+}
+
+declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 2 x float> %b
+}
+
+declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
+ <vscale x 4 x float> undef,
+ <vscale x 4 x float>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 4 x float> %b
+}
+
+declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 4 x float> %b
+}
+
+declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
+ <vscale x 8 x float> undef,
+ <vscale x 8 x float>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 8 x float> %b
+}
+
+declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 8 x float> %b
+}
+
+declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vle32ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vle32ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
+ <vscale x 16 x float> undef,
+ <vscale x 16 x float>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 16 x float> %b
+}
+
+declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; RV32-NEXT: vle32ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; RV64-NEXT: vle32ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 16 x float> %b
+}
+
+declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x i16>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 1 x i16> %b
+}
+
+declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x i16> %b
+}
+
+declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
+ <vscale x 2 x i16> undef,
+ <vscale x 2 x i16>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 2 x i16> %b
+}
+
+declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 2 x i16> %b
+}
+
+declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
+ <vscale x 4 x i16> undef,
+ <vscale x 4 x i16>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 4 x i16> %b
+}
+
+declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 4 x i16> %b
+}
+
+declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
+ <vscale x 8 x i16> undef,
+ <vscale x 8 x i16>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 8 x i16> %b
+}
+
+declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 8 x i16> %b
+}
+
+declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
+ <vscale x 16 x i16> undef,
+ <vscale x 16 x i16>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 16 x i16> %b
+}
+
+declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 16 x i16> %b
+}
+
+declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
+ <vscale x 32 x i16> undef,
+ <vscale x 32 x i16>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 32 x i16> %b
+}
+
+declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 32 x i16> %b
+}
+
+declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(<vscale x 1 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 1 x half> %b
+}
+
+declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x half> %b
+}
+
+declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(<vscale x 2 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
+ <vscale x 2 x half> undef,
+ <vscale x 2 x half>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 2 x half> %b
+}
+
+declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 2 x half> %b
+}
+
+declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(<vscale x 4 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
+ <vscale x 4 x half> undef,
+ <vscale x 4 x half>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 4 x half> %b
+}
+
+declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 4 x half> %b
+}
+
+declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(<vscale x 8 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
+ <vscale x 8 x half> undef,
+ <vscale x 8 x half>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 8 x half> %b
+}
+
+declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 8 x half> %b
+}
+
+declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(<vscale x 16 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
+ <vscale x 16 x half> undef,
+ <vscale x 16 x half>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 16 x half> %b
+}
+
+declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 16 x half> %b
+}
+
+declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(<vscale x 32 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT: vle16ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT: vle16ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
+ <vscale x 32 x half> undef,
+ <vscale x 32 x half>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 32 x half> %b
+}
+
+declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; RV32-NEXT: vle16ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; RV64-NEXT: vle16ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 32 x half> %b
+}
+
+declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vle8ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vle8ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 1 x i8> %b
+}
+
+declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; RV32-NEXT: vle8ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; RV64-NEXT: vle8ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x i8> %b
+}
+
+declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV32-NEXT: vle8ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV64-NEXT: vle8ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
+ <vscale x 2 x i8> undef,
+ <vscale x 2 x i8>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 2 x i8> %b
+}
+
+declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; RV32-NEXT: vle8ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; RV64-NEXT: vle8ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 2 x i8> %b
+}
+
+declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV32-NEXT: vle8ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV64-NEXT: vle8ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
+ <vscale x 4 x i8> undef,
+ <vscale x 4 x i8>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 4 x i8> %b
+}
+
+declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; RV32-NEXT: vle8ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; RV64-NEXT: vle8ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 4 x i8> %b
+}
+
+declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vle8ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vle8ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
+ <vscale x 8 x i8> undef,
+ <vscale x 8 x i8>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 8 x i8> %b
+}
+
+declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; RV32-NEXT: vle8ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; RV64-NEXT: vle8ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 8 x i8> %b
+}
+
+declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vle8ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vle8ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
+ <vscale x 16 x i8> undef,
+ <vscale x 16 x i8>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 16 x i8> %b
+}
+
+declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; RV32-NEXT: vle8ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; RV64-NEXT: vle8ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 16 x i8> %b
+}
+
+declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vle8ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vle8ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
+ <vscale x 32 x i8> undef,
+ <vscale x 32 x i8>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 32 x i8> %b
+}
+
+declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; RV32-NEXT: vle8ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; RV64-NEXT: vle8ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 32 x i8> %b
+}
+
+declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vle8ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vle8ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
+ <vscale x 64 x i8> undef,
+ <vscale x 64 x i8>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %2
+ ret <vscale x 64 x i8> %b
+}
+
+declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; RV32-NEXT: vle8ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; RV64-NEXT: vle8ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 64 x i8> %b
+}
+
+; Test with the VL output unused
+define <vscale x 1 x double> @intrinsic_vleff_dead_vl(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; CHECK-LABEL: intrinsic_vleff_dead_vl:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+ <vscale x 1 x double> undef,
+ <vscale x 1 x double>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
+ ret <vscale x 1 x double> %b
+}
+
+define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
+
+ ret <vscale x 1 x double> %b
+}
+
+; Test with the loaded value unused
+define void @intrinsic_vleff_dead_value(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_dead_value:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0)
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_dead_value:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0)
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+ <vscale x 1 x double> undef,
+ <vscale x 1 x double>* %0,
+ iXLen %1)
+ %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
+ store iXLen %b, iXLen* %2
+ ret void
+}
+
+define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_dead_value:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_dead_value:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
+ store iXLen %b, iXLen* %4
+
+ ret void
+}
+
+; Test with both outputs dead. Make sure the vleff isn't deleted.
+define void @intrinsic_vleff_dead_all(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; CHECK-LABEL: intrinsic_vleff_dead_all:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+ <vscale x 1 x double> undef,
+ <vscale x 1 x double>* %0,
+ iXLen %1)
+ ret void
+}
+
+define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret void
+}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32,
- i32
-);
-
-define <vscale x 1 x i8> @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32,
- i32
-);
-
-define <vscale x 2 x i8> @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i8> @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32,
- i32
-);
-
-define <vscale x 4 x i8> @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i8> @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32,
- i32
-);
-
-define <vscale x 8 x i8> @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32, i32);
-
-define <vscale x 8 x i8> @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32,
- i32
-);
-
-define <vscale x 16 x i8> @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32, i32);
-
-define <vscale x 16 x i8> @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32,
- i32
-);
-
-define <vscale x 32 x i8> @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32, i32);
-
-define <vscale x 32 x i8> @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32,
- i32
-);
-
-define <vscale x 1 x i16> @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32,
- i32
-);
-
-define <vscale x 2 x i16> @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32,
- i32
-);
-
-define <vscale x 4 x i16> @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32,
- i32
-);
-
-define <vscale x 8 x i16> @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32,
- i32
-);
-
-define <vscale x 16 x i16> @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32
-);
-
-define <vscale x 1 x i32> @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i32 %3, i32 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32
-);
-
-define <vscale x 2 x i32> @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i32 %3, i32 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32
-);
-
-define <vscale x 4 x i32> @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i32 %3, i32 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32
-);
-
-define <vscale x 8 x i32> @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i32 %3, i32 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32,
- i32
-);
-
-define <vscale x 1 x i64> @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i32 %3, i32 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32,
- i32
-);
-
-define <vscale x 2 x i64> @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i32 %3, i32 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32,
- i32
-);
-
-define <vscale x 4 x i64> @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i32 %3, i32 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i8>,
- i32,
- i32
-);
-
-define <vscale x 1 x i8> @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i8>,
- i32,
- i32
-);
-
-define <vscale x 2 x i8> @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i8>,
- i32,
- i32
-);
-
-define <vscale x 4 x i8> @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i8>,
- i32,
- i32
-);
-
-define <vscale x 8 x i8> @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32, i32);
-
-define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i8>,
- i32,
- i32
-);
-
-define <vscale x 16 x i8> @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32, i32);
-
-define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i8>,
- i32,
- i32
-);
-
-define <vscale x 32 x i8> @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i8> %2,
- i32 %3, i32 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32, i32);
-
-define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i16>,
- i32,
- i32
-);
-
-define <vscale x 1 x i16> @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i16>,
- i32,
- i32
-);
-
-define <vscale x 2 x i16> @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i16>,
- i32,
- i32
-);
-
-define <vscale x 4 x i16> @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i16>,
- i32,
- i32
-);
-
-define <vscale x 8 x i16> @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i16>,
- i32,
- i32
-);
-
-define <vscale x 16 x i16> @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i16> %2,
- i32 %3, i32 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i32>,
- i32,
- i32
-);
-
-define <vscale x 1 x i32> @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i32> %2,
- i32 %3, i32 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i32>,
- i32,
- i32
-);
-
-define <vscale x 2 x i32> @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i32> %2,
- i32 %3, i32 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i32>,
- i32,
- i32
-);
-
-define <vscale x 4 x i32> @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i32> %2,
- i32 %3, i32 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i32>,
- i32,
- i32
-);
-
-define <vscale x 8 x i32> @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i32> %2,
- i32 %3, i32 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i64>,
- i32,
- i32
-);
-
-define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v10, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i64> %2,
- i32 %3, i32 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v10, v9, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i64>,
- i32,
- i32
-);
-
-define <vscale x 2 x i64> @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v12, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i64> %2,
- i32 %3, i32 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v12, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i64>,
- i32,
- i32
-);
-
-define <vscale x 4 x i64> @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT: vmacc.vv v8, v16, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i64> %2,
- i32 %3, i32 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
-; CHECK-NEXT: vmacc.vv v8, v16, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 0)
-
- ret <vscale x 4 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 2 x i8> @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 4 x i8> @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 8 x i8> @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 16 x i8> @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 32 x i8> @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 1 x i16> @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 2 x i16> @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 4 x i16> @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 8 x i16> @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 16 x i16> @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 1 x i32> @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 2 x i32> @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 4 x i32> @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 8 x i32> @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 1 x i64> @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 2 x i64> @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 4 x i64> @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i64> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i8> %a
}
i8,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
i8 %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i8> %a
}
i8,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
i8 %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i8> %a
}
i8,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
i8 %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i8> %a
}
i8,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
i8 %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i8> %a
}
i8,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
i8 %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i8> %a
}
i8,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
i8 %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
i16,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
i16 %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
i16,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
i16 %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
i16,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
i16 %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
i16,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
i16 %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
i16,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
i16 %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
i32,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
i32 %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
i32,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
i32 %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
i32,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
i32 %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
i32,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64, i64);
+ iXLen, iXLen);
-define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
i32 %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v9
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT: vmacc.vv v8, v10, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT: vmacc.vx v8, a0, v9
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
i64,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vmacc.vv v8, v10, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vmacc.vx v8, a0, v9, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v10
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma
+; RV32-NEXT: vmacc.vv v8, v12, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT: vmacc.vx v8, a0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
i64,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT: vmacc.vv v8, v12, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; RV64-NEXT: vmacc.vx v8, a0, v10, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT: vmacc.vx v8, a0, v12
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma
+; RV32-NEXT: vmacc.vv v8, v16, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT: vmacc.vx v8, a0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
- i64 %3, i64 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
i64,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT: vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu
+; RV32-NEXT: vmacc.vv v8, v16, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; RV64-NEXT: vmacc.vx v8, a0, v12, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
-
-define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmadc.vv v0, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
- <vscale x 64 x i8>,
- i8,
- i32);
-
-define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
- <vscale x 32 x i16>,
- i16,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
- <vscale x 16 x i32>,
- i32,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmadc.vv v0, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmadc.vv v0, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmadc.vv v0, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
- <vscale x 8 x i64>,
- i64,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmadc.vv v0, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 -9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 -9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 -9,
- i32 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 64 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 -9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 -9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 -9,
- i32 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 -9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 -9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- i32 -9,
- i32 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 -9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmadc.vi v0, v8, -9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- i64 -9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i1> %2,
- i64 %3)
-
- ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- i8 %1,
- <vscale x 64 x i1> %2,
- i64 %3)
-
- ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- i16 %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- i32 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- i64 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 9,
- <vscale x 1 x i1> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 9,
- <vscale x 2 x i1> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 9,
- <vscale x 4 x i1> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 9,
- <vscale x 8 x i1> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmadc.vim v10, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 9,
- <vscale x 16 x i1> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmadc.vim v12, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 9,
- <vscale x 32 x i1> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmadc.vim v16, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- i8 9,
- <vscale x 64 x i1> %1,
- i64 %2)
-
- ret <vscale x 64 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 9,
- <vscale x 1 x i1> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 9,
- <vscale x 2 x i1> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 9,
- <vscale x 4 x i1> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmadc.vim v10, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 9,
- <vscale x 8 x i1> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmadc.vim v12, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 9,
- <vscale x 16 x i1> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmadc.vim v16, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- i16 9,
- <vscale x 32 x i1> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 9,
- <vscale x 1 x i1> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 9,
- <vscale x 2 x i1> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmadc.vim v10, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 9,
- <vscale x 4 x i1> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmadc.vim v12, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 9,
- <vscale x 8 x i1> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmadc.vim v16, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- i32 9,
- <vscale x 16 x i1> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 9,
- <vscale x 1 x i1> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmadc.vim v10, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 9,
- <vscale x 2 x i1> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmadc.vim v12, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 9,
- <vscale x 4 x i1> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmadc.vim v16, v8, 9, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- i64 9,
- <vscale x 8 x i1> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32);
+ iXLen);
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
<vscale x 64 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 64 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32);
+ iXLen);
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> %0,
i8 %1,
<vscale x 64 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 64 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> %0,
i16 %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> %0,
i32 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmadc.vvm v9, v8, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmadc.vvm v9, v8, v10, v0
+; RV32-NEXT: vmv.v.v v0, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmadc.vxm v9, v8, a0, v0
+; RV64-NEXT: vmv.v.v v0, v9
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmadc.vvm v10, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmadc.vvm v10, v8, v12, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmadc.vxm v10, v8, a0, v0
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmadc.vvm v12, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmadc.vvm v12, v8, v16, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmadc.vxm v12, v8, a0, v0
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vmadc.vvm v16, v8, v24, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vmadc.vvm v16, v8, v24, v0
+; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vmadc.vxm v16, v8, a0, v0
+; RV64-NEXT: vmv1r.v v0, v16
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
i8 9,
<vscale x 1 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
i8 9,
<vscale x 2 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
i8 9,
<vscale x 4 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> %0,
i8 9,
<vscale x 8 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> %0,
i8 9,
<vscale x 16 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> %0,
i8 9,
<vscale x 32 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> %0,
i8 9,
<vscale x 64 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
i16 9,
<vscale x 1 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
i16 9,
<vscale x 2 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> %0,
i16 9,
<vscale x 4 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> %0,
i16 9,
<vscale x 8 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> %0,
i16 9,
<vscale x 16 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> %0,
i16 9,
<vscale x 32 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
i32 9,
<vscale x 1 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> %0,
i32 9,
<vscale x 2 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> %0,
i32 9,
<vscale x 4 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> %0,
i32 9,
<vscale x 8 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> %0,
i32 9,
<vscale x 16 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> %0,
i64 9,
<vscale x 1 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> %0,
i64 9,
<vscale x 2 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> %0,
i64 9,
<vscale x 4 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> %0,
i64 9,
<vscale x 8 x i1> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64);
+ iXLen);
-define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
%a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
<vscale x 64 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
%a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
<vscale x 64 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
<vscale x 32 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
<vscale x 32 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
<vscale x 16 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
<vscale x 16 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmadc.vv v0, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmadc.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmadc.vv v0, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmadc.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmadc.vv v0, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmadc.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmadc.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmadc.vv v0, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vmadc.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
%a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
<vscale x 64 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 64 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
<vscale x 32 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
<vscale x 16 x i32> %0,
i32 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 -9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i8>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i8>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i8>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i8>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i8>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i8>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i16>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i16>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i16>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i16>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i16>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i32>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i32>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i32>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i32>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT: vmadd.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i8> @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i8> @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i8> @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i8> @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 32 x i8> @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i16> @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i16> @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i16> @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i16> @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i16> @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i32> @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i32> @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i32> @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i32> @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i64> @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i64> @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i64> @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i64> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i8> %a
}
i8,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
i8 %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i8> %a
}
i8,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
i8 %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i8> %a
}
i8,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
i8 %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i8> %a
}
i8,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
i8 %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i8> %a
}
i8,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
i8 %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i8> %a
}
i8,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
i8 %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
i16,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
i16 %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
i16,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
i16 %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
i16,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
i16 %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
i16,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
i16 %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
i16,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
i16 %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
i32,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
i32 %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
i32,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
i32 %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
i32,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
i32 %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
i32,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
i32 %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v10, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT: vmadd.vv v8, v10, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT: vmadd.vx v8, a0, v9
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
i64,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v10, v9, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vmadd.vv v8, v10, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vmadd.vx v8, a0, v9, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v12, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma
+; RV32-NEXT: vmadd.vv v8, v12, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT: vmadd.vx v8, a0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
i64,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v12, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT: vmadd.vv v8, v12, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; RV64-NEXT: vmadd.vx v8, a0, v10, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT: vmadd.vv v8, v16, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma
+; RV32-NEXT: vmadd.vv v8, v16, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT: vmadd.vx v8, a0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
i64,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
-; CHECK-NEXT: vmadd.vv v8, v16, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu
+; RV32-NEXT: vmadd.vv v8, v16, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; RV64-NEXT: vmadd.vx v8, a0, v12, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmax.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmax.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmax.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmax.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmax.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmax.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmax.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmax.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmax.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmax.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmax.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmax.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmax.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmax.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmax.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmax.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmax.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmax.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmax.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vmax.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vmax.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vmax.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmaxu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmaxu.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmaxu.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmaxu.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmaxu.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmaxu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmaxu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmaxu.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmaxu.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmaxu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmaxu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmaxu.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmaxu.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmaxu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmaxu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmaxu.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmaxu.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmaxu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmaxu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vmaxu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vmaxu.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vmaxu.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmin.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmin.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmin.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmin.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmin.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmin.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmin.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmin.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmin.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmin.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmin.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmin.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmin.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmin.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmin.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmin.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmin.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmin.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmin.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vmin.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vmin.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vmin.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vminu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vminu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vminu.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vminu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vminu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vminu.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vminu.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vminu.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vminu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vminu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vminu.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vminu.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vminu.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vminu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vminu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vminu.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vminu.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vminu.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vminu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vminu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vminu.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vminu.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmsbc.vv v0, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i1> %2,
- i64 %3)
-
- ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- i8 %1,
- <vscale x 64 x i1> %2,
- i64 %3)
-
- ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- i16 %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- i32 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- i64 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32);
+ iXLen);
-define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
<vscale x 64 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 64 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32);
+ iXLen);
-define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> %0,
i8 %1,
<vscale x 64 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 64 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> %0,
i16 %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> %0,
i32 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsbc.vvm v9, v8, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsbc.vvm v9, v8, v10, v0
+; RV32-NEXT: vmv.v.v v0, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsbc.vxm v9, v8, a0, v0
+; RV64-NEXT: vmv.v.v v0, v9
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsbc.vvm v10, v8, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsbc.vvm v10, v8, v12, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsbc.vxm v10, v8, a0, v0
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmsbc.vvm v12, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmsbc.vvm v12, v8, v16, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsbc.vxm v12, v8, a0, v0
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vmsbc.vvm v16, v8, v24, v0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vmsbc.vvm v16, v8, v24, v0
+; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vmsbc.vxm v16, v8, a0, v0
+; RV64-NEXT: vmv1r.v v0, v16
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen);
-define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
%a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
%a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsbc.vv v0, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsbc.vv v0, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsbc.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsbc.vv v0, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsbc.vv v0, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsbc.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsbc.vv v0, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsbc.vv v0, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsbc.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmsbc.vv v0, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmsbc.vv v0, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vmsbc.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmseq.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmseq.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmseq.vv v0, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmseq.vv v0, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmseq.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmseq.vv v10, v8, v11, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmseq.vv v10, v8, v11, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmseq.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmseq.vv v0, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmseq.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmseq.vv v11, v8, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmseq.vv v11, v8, v12, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmseq.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v11
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmseq.vv v0, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmseq.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmseq.vv v13, v8, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmseq.vv v13, v8, v16, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmseq.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnot.m v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v11, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnot.m v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v13, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnot.m v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v11, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnot.m v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v13, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnot.m v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v11, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnot.m v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v13, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnot.m v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v11, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnot.m v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v13, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 -15,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, -15, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 -14,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -14
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 -13,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, -13, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 -12,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 -11,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, -11, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 -10,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 -9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, -9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 -8,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 -7,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vi v11, v8, -7, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 -6,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -6
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 -5,
- i64 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vi v13, v8, -5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 -4,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -4
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 -3,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, -3, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 -2,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -2
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 -1,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, -1, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 0,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -1
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 0,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 1
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 2,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vi v11, v8, 2, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 3,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 3
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 4,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vi v13, v8, 4, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 5,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 5
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 6,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 6, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 7,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 7
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 8,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 10,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vi v11, v8, 10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 11,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 12,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vi v13, v8, 12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 13,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 14,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 14, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 15,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 15
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 16,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vi v11, v8, -16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 -15,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, -15
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 -14,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vi v13, v8, -14, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 -13,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-; Test cases where the mask and maskedoff are the same value.
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %0,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %0,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %0,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %0,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %0,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %0,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %0,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %0,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %0,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %0,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %0,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %0,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %0,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %0,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %0,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %0,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %0,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %0,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsle.vv v0, v9, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsle.vv v0, v9, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmslt.vx v8, v8, a0
+; RV64-NEXT: vmnot.m v0, v8
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vv v10, v11, v8, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmsle.vv v10, v11, v8, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmxor.mm v0, v10, v9
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsle.vv v0, v10, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmslt.vx v10, v8, a0
+; RV64-NEXT: vmnot.m v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vv v11, v12, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmsle.vv v11, v12, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmxor.mm v0, v11, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsle.vv v0, v12, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmslt.vx v12, v8, a0
+; RV64-NEXT: vmnot.m v0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vv v13, v16, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmsle.vv v13, v16, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmxor.mm v0, v13, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 -15,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 -13,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 -11,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 -9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 -7,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 -5,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 -3,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 -1,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 0,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 2,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 4,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 6,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 8,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 10,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 12,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
; Test cases where the mask and maskedoff are the same value.
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmslt.vx v8, v8, a0
+; RV64-NEXT: vmandn.mm v0, v0, v8
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmsle.vv v10, v12, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmsle.vv v10, v12, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmslt.vx v10, v8, a0
+; RV64-NEXT: vmandn.mm v0, v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v12, v0
+; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmslt.vx v12, v8, a0
+; RV64-NEXT: vmandn.mm v0, v0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnot.m v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v11, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnot.m v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v13, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnot.m v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v11, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnot.m v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v13, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnot.m v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v11, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnot.m v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v13, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnot.m v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v10, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnot.m v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v11, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnot.m v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmxor.mm v0, v13, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 -15,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, -15, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 -14,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -14
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 -13,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, -13, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 -12,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 -11,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, -11, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 -10,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 -9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, -9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 -8,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 -7,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vi v11, v8, -7, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 -6,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -6
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 -5,
- i64 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vi v13, v8, -5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 -4,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -4
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 -3,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, -3, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 -2,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -2
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 -1,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmor.mm v0, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 0,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmset.m v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 0,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 0,
- <vscale x 2 x i1> %0,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 1
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 2,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vi v11, v8, 2, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 3,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 3
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 4,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vi v13, v8, 4, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 5,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 5
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 6,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 6, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 7,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 7
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 8,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 8, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 10,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vi v11, v8, 10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 11,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 12,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vi v13, v8, 12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 13,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 14,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 14, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 15,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 15
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 16,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vi v11, v8, -16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 -15,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, -15
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 -14,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vi v13, v8, -14, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 -13,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-; Test cases where the mask and maskedoff are the same value.
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %0,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %0,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %0,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %0,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %0,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %0,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %0,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %0,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %0,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %0,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %0,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %0,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %0,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %0,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %0,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %0,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %0,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandn.mm v0, v0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %0,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsleu.vv v0, v9, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsltu.vx v8, v8, a0
+; RV64-NEXT: vmnot.m v0, v8
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vv v10, v11, v8, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmsleu.vv v10, v11, v8, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmxor.mm v0, v10, v9
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsleu.vv v0, v10, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsltu.vx v10, v8, a0
+; RV64-NEXT: vmnot.m v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vv v11, v12, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmsleu.vv v11, v12, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmxor.mm v0, v11, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsleu.vv v0, v12, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsltu.vx v12, v8, a0
+; RV64-NEXT: vmnot.m v0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vv v13, v16, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmsleu.vv v13, v16, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmxor.mm v0, v13, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 -15,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 -13,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 -11,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 -9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 -7,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 -5,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 -3,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 -1,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ret
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %0,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 0,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 2,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 4,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 6,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 8,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 10,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 12,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 14,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 16,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 -14,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
; Test cases where the mask and maskedoff are the same value.
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsleu.vv v0, v9, v8, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsltu.vx v8, v8, a0
+; RV64-NEXT: vmandn.mm v0, v0, v8
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmsleu.vv v10, v12, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmsleu.vv v10, v12, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsltu.vx v10, v8, a0
+; RV64-NEXT: vmandn.mm v0, v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: vmsleu.vv v12, v16, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v12, v0
+; RV32-NEXT: vmsleu.vv v12, v16, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsltu.vx v12, v8, a0
+; RV64-NEXT: vmandn.mm v0, v0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %0,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsgt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmslt.vv v0, v9, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmslt.vv v0, v9, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsgt.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vv v10, v11, v8, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmslt.vv v10, v11, v8, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmsgt.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmslt.vv v0, v10, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsgt.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vv v11, v12, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmslt.vv v11, v12, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmsgt.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v11
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmslt.vv v0, v12, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsgt.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vv v13, v16, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmslt.vv v13, v16, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmsgt.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v9, v8
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v14, v10, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v20, v12, v8
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsgtu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsltu.vv v0, v9, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsgtu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vv v10, v11, v8, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmsltu.vv v10, v11, v8, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmsgtu.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsltu.vv v0, v10, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsgtu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vv v11, v12, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmsltu.vv v11, v12, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmsgtu.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v11
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsltu.vv v0, v12, v8
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsgtu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vv v13, v16, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmsltu.vv v13, v16, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmsgtu.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i32 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i32 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i32 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i32 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i32 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i32 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i32 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i32 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i32 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i32 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i32 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i32 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i32 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i32 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i32 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i32 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i32 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i32 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i32 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i32 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i32 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsle.vv v0, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vv v10, v8, v11, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vv v11, v8, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vv v13, v8, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i32 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsle.vv v0, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsle.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmsle.vv v10, v8, v11, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmsle.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsle.vv v0, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsle.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmsle.vv v11, v8, v12, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmsle.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v11
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsle.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsle.vv v0, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsle.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmsle.vv v13, v8, v16, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmsle.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsleu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsleu.vv v0, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsleu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vv v10, v8, v11, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmsleu.vv v10, v8, v11, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmsleu.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsleu.vv v0, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsleu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vv v11, v8, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmsleu.vv v11, v8, v12, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmsleu.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v11
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsleu.vv v0, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsleu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vv v13, v8, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmsleu.vv v13, v8, v16, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmsleu.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 -15,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, -15, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 -14,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -14
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 -13,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, -13, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 -12,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 -11,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, -11, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 -10,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 -9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, -9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 -8,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 -7,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vi v11, v8, -7, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 -6,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -6
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 -5,
- i64 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vi v13, v8, -5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 -4,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -4
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 -3,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, -3, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 -2,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -2
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 -1,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vx v10, v8, zero, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 0,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmslt.vx v0, v8, zero
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 0,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 1
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 2,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vi v11, v8, 2, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 3,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 3
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 4,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vi v13, v8, 4, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 5,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 5
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 6,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 6, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 7,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 7
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 8,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 10,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vi v11, v8, 10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 11,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 12,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vi v13, v8, 12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 13,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 14,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsle.vi v10, v8, 14, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 15,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, 15
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 16,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsle.vi v11, v8, -16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 -15,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -15
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 -14,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsle.vi v13, v8, -14, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 -13,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmslt.vv v0, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmslt.vv v0, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmslt.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmslt.vv v10, v8, v11, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmslt.vv v10, v8, v11, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmslt.vv v0, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmslt.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmslt.vv v11, v8, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmslt.vv v11, v8, v12, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v11
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmslt.vv v0, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmslt.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmslt.vv v13, v8, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmslt.vv v13, v8, v16, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 -15,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 -13,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 -11,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 -9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 -7,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 -5,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 -3,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 -1,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 0,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 2,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 4,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 6,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 8,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 10,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 12,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 -15,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, -15, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 -14,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -14
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 -13,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, -13, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 -12,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 -11,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, -11, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 -10,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 -9,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, -9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 -8,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 -7,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vi v11, v8, -7, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 -6,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -6
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 -5,
- i64 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vi v13, v8, -5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 -4,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -4
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 -3,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, -3, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 -2,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -2
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 -1,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vx v10, v8, zero, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 0,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsltu.vx v0, v8, zero
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 0,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 1
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 2,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vi v11, v8, 2, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 3,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 3
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 4,
- i64 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vi v13, v8, 4, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 5,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 5
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 6,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 6, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 7,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 7
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 8,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 8, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 10,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vi v11, v8, 10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 11,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 12,
- i64 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vi v13, v8, 12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 13,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 14,
- i64 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsleu.vi v10, v8, 14, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 15,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, 15
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 16,
- i64 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsleu.vi v11, v8, -16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 -15,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsleu.vi v0, v8, -15
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 -14,
- i64 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsleu.vi v13, v8, -14, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 -13,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsltu.vv v0, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsltu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsltu.vv v10, v8, v11, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmsltu.vv v10, v8, v11, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsltu.vv v0, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsltu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsltu.vv v11, v8, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmsltu.vv v11, v8, v12, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v11
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsltu.vv v0, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsltu.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsltu.vv v13, v8, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmsltu.vv v13, v8, v16, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 -15,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 -13,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 -11,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 -9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 -7,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 -5,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 -3,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 -1,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 0,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 2,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 4,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 6,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 8,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 10,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 12,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 14,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 16,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 -14,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i32 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i8> %3,
- <vscale x 1 x i1> %mask,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i32 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i8> %3,
- <vscale x 2 x i1> %mask,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i32 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i8> %3,
- <vscale x 4 x i1> %mask,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i32 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i8> %3,
- <vscale x 8 x i1> %mask,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i32 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i8> %3,
- <vscale x 16 x i1> %mask,
- i32 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i32 %4)
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i8> %3,
- <vscale x 32 x i1> %mask,
- i32 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i32 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i16> %3,
- <vscale x 1 x i1> %mask,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i32 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i16> %3,
- <vscale x 2 x i1> %mask,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i32 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i16> %3,
- <vscale x 4 x i1> %mask,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i32 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i16> %3,
- <vscale x 8 x i1> %mask,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i32 %4)
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i16> %3,
- <vscale x 16 x i1> %mask,
- i32 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i32 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i32> %3,
- <vscale x 1 x i1> %mask,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i32 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i32> %3,
- <vscale x 2 x i1> %mask,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i32 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i32> %3,
- <vscale x 4 x i1> %mask,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i32 %4)
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i32> %3,
- <vscale x 8 x i1> %mask,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT: vmv.v.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i32 %4)
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i64> %3,
- <vscale x 1 x i1> %mask,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v14, v8, v10
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v14
-; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i32 %4)
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i64> %3,
- <vscale x 2 x i1> %mask,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v20, v8, v12
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv1r.v v0, v20
-; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: ret
-entry:
- %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i32 %4)
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i64> %3,
- <vscale x 4 x i1> %mask,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
- <vscale x 1 x i1>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
- <vscale x 2 x i1>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
- <vscale x 4 x i1>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
- <vscale x 8 x i1>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
- <vscale x 16 x i1>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
- <vscale x 32 x i1>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
- <vscale x 1 x i1>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
- <vscale x 2 x i1>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
- <vscale x 4 x i1>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
- <vscale x 8 x i1>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
- <vscale x 16 x i1>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
- <vscale x 1 x i1>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
- <vscale x 2 x i1>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
- <vscale x 4 x i1>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
- <vscale x 8 x i1>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vmsne.vv v0, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
- <vscale x 1 x i1>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v11, (a0), zero
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vv v10, v8, v11, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
- <vscale x 2 x i1>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vv v11, v8, v12, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
- <vscale x 4 x i1>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vv v13, v8, v16, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i32 %3)
-
- ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i32 %3)
-
- ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3)
-
- ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i32 %3)
-
- ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i32 %3)
-
- ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i32 %3)
-
- ret <vscale x 4 x i1> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
<vscale x 2 x i1> %0,
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
<vscale x 4 x i1> %0,
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
<vscale x 8 x i1> %0,
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
<vscale x 16 x i1> %0,
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
<vscale x 32 x i1> %0,
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
<vscale x 1 x i1> %0,
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
<vscale x 2 x i1> %0,
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
<vscale x 4 x i1> %0,
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
<vscale x 8 x i1> %0,
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
<vscale x 16 x i1> %0,
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
<vscale x 1 x i1> %0,
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
<vscale x 2 x i1> %0,
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
<vscale x 4 x i1> %0,
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
<vscale x 8 x i1> %0,
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i64 %4)
+ iXLen %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i1> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64);
+ iXLen);
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 32 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i1> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64);
+ iXLen);
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64);
+ iXLen);
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64);
+ iXLen);
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64);
+ iXLen);
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i1> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64);
+ iXLen);
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vmsne.vv v0, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmsne.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i1> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT: vmv.v.v v0, v10
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v11, (a0), zero
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: vmv1r.v v0, v9
+; RV32-NEXT: vmsne.vv v10, v8, v11, v0.t
+; RV32-NEXT: vmv.v.v v0, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vmsne.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vmsne.vv v0, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vmsne.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i1> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmsne.vv v11, v8, v12, v0.t
+; RV32-NEXT: vmv1r.v v0, v11
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v11, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vmsne.vx v11, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v11
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vmsne.vx v0, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vmsne.vv v0, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vmsne.vx v0, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i1> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vmv1r.v v13, v0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vmsne.vv v13, v8, v16, v0.t
+; RV32-NEXT: vmv1r.v v0, v13
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmsne.vx v13, v8, a0, v0.t
+; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4)
+ iXLen %4)
ret <vscale x 4 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
%a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 32 x i1> %a
}
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 32 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
%a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 16 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
%a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 8 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 1 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 2 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i1> %a
}
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i64 %3)
+ iXLen %3)
ret <vscale x 4 x i1> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i8>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i8>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i8>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i8>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i8>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i8>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i16>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i16>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i16>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i16>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i16>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i32>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i32>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i32>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i32>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT: vnmsac.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT: vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i64> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i8> %a
}
i8,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
i8 %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i8> %a
}
i8,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
i8 %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i8> %a
}
i8,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
i8 %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i8> %a
}
i8,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
i8 %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i8> %a
}
i8,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
i8 %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i8> %a
}
i8,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
i8 %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
i16,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
i16 %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
i16,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
i16 %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
i16,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
i16 %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
i16,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
i16 %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
i16,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
i16 %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
i32,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
i32 %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
i32,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
i32 %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
i32,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
i32 %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
i32,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
i32 %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v10, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT: vnmsac.vv v8, v10, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT: vnmsac.vx v8, a0, v9
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
i64,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v10, v9, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vnmsac.vv v8, v10, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vnmsac.vx v8, a0, v9, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v12, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma
+; RV32-NEXT: vnmsac.vv v8, v12, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT: vnmsac.vx v8, a0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
i64,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v12, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT: vnmsac.vv v8, v12, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; RV64-NEXT: vnmsac.vx v8, a0, v10, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT: vnmsac.vv v8, v16, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma
+; RV32-NEXT: vnmsac.vv v8, v16, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT: vnmsac.vx v8, a0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
i64,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
-; CHECK-NEXT: vnmsac.vv v8, v16, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu
+; RV32-NEXT: vnmsac.vv v8, v16, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; RV64-NEXT: vnmsac.vx v8, a0, v12, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v9, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v10, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v12, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i8>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i8>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i8>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i8>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i8>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i8>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i8> %2,
- i64 %3, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i16>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i16>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i16>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i16>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i16>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i16> %2,
- i64 %3, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i32>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i32>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i32>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i32>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i32> %2,
- i64 %3, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT: vnmsub.vx v8, a0, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i64> %2,
- i64 %3, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT: vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 4 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i64> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i8> %a
}
i8,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
i8 %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i8> %a
}
i8,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
i8 %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i8> %a
}
i8,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
i8 %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i8> %a
}
i8,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
i8 %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i8> %a
}
i8,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
i8 %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i8> %a
}
i8,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
i8 %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
i16,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
i16 %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
i16,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
i16 %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
i16,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
i16 %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
i16,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
i16 %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
i16,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
i16 %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
i32,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
i32 %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
i32,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
i32 %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
i32,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
i32 %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
i32,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32, i32);
+ iXLen, iXLen);
-define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
i32 %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v10, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT: vnmsub.vv v8, v10, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT: vnmsub.vx v8, a0, v9
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
i64,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v10, v9, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vnmsub.vv v8, v10, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vnmsub.vx v8, a0, v9, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v12, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma
+; RV32-NEXT: vnmsub.vv v8, v12, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT: vnmsub.vx v8, a0, v10
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
i64,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v12, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT: vnmsub.vv v8, v12, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; RV64-NEXT: vnmsub.vx v8, a0, v10, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT: vnmsub.vv v8, v16, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma
+; RV32-NEXT: vnmsub.vv v8, v16, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT: vnmsub.vx v8, a0, v12
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
- i32 %3, i32 0)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
i64,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
-; CHECK-NEXT: vnmsub.vv v8, v16, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu
+; RV32-NEXT: vnmsub.vv v8, v16, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; RV64-NEXT: vnmsub.vx v8, a0, v12, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 0)
+ iXLen %4, iXLen 0)
ret <vscale x 4 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vor.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vor.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vor.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 9,
- <vscale x 64 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 9,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT: vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vor.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vor.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vor.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vor.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vor.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vor.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vor.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vor.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vor.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vor.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vor.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vor.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vor.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vor.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 9,
<vscale x 64 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 9,
<vscale x 32 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 9,
<vscale x 16 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vrem.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vrem.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vrem.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vrem.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vrem.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i64);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i64);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i64);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vrem.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vrem.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vrem.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vrem.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vrem.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vrem.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vrem.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vrem.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vrem.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vrem.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vrem.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vrem.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vrem.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vremu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vremu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vremu.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vremu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vremu.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vremu.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vremu.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vremu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vremu.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vremu.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vremu.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vremu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vremu.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vremu.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vremu.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vremu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vremu.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vremu.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i1> %2,
- i64 %3)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- <vscale x 64 x i1> %2,
- i64 %3)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- <vscale x 32 x i1> %2,
- i64 %3)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- <vscale x 16 x i1> %2,
- i64 %3)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- <vscale x 1 x i1> %2,
- i64 %3)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- <vscale x 2 x i1> %2,
- i64 %3)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- <vscale x 4 x i1> %2,
- i64 %3)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- <vscale x 8 x i1> %2,
- i64 %3)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
<vscale x 64 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> %0,
i8 %1,
<vscale x 64 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> %0,
i16 %1,
<vscale x 32 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> %0,
i32 %1,
<vscale x 16 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsbc.vvm v8, v8, v9, v0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsbc.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsbc.vvm v8, v8, v10, v0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsbc.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsbc.vvm v8, v8, v12, v0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsbc.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsbc.vvm v8, v8, v16, v0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsbc.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
<vscale x 8 x i1> %2,
- i32 %3)
+ iXLen %3)
ret <vscale x 8 x i64> %a
}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
-
-define <vscale x 1 x half> @vfmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x half> %va, <vscale x 1 x half> %vb
- ret <vscale x 1 x half> %vc
-}
-
-define <vscale x 1 x half> @vfmerge_fv_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x half> poison, half %b, i32 0
- %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x half> %splat, <vscale x 1 x half> %va
- ret <vscale x 1 x half> %vc
-}
-
-define <vscale x 2 x half> @vfmerge_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x half> %va, <vscale x 2 x half> %vb
- ret <vscale x 2 x half> %vc
-}
-
-define <vscale x 2 x half> @vfmerge_fv_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x half> poison, half %b, i32 0
- %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x half> %splat, <vscale x 2 x half> %va
- ret <vscale x 2 x half> %vc
-}
-
-define <vscale x 4 x half> @vfmerge_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x half> %va, <vscale x 4 x half> %vb
- ret <vscale x 4 x half> %vc
-}
-
-define <vscale x 4 x half> @vfmerge_fv_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x half> poison, half %b, i32 0
- %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x half> %splat, <vscale x 4 x half> %va
- ret <vscale x 4 x half> %vc
-}
-
-define <vscale x 8 x half> @vfmerge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
- ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 8 x half> @vfmerge_fv_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
- %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> %splat, <vscale x 8 x half> %va
- ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 8 x half> @vfmerge_zv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_zv_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x half> poison, half zeroinitializer, i32 0
- %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> %splat, <vscale x 8 x half> %va
- ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
-; CHECK-LABEL: vmerge_truelhs_nxv8f16_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ret
- %mhead = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
- %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %mtrue, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
- ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 8 x half> @vmerge_falselhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
-; CHECK-LABEL: vmerge_falselhs_nxv8f16_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vmv2r.v v8, v10
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
- ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 16 x half> @vfmerge_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x half> %va, <vscale x 16 x half> %vb
- ret <vscale x 16 x half> %vc
-}
-
-define <vscale x 16 x half> @vfmerge_fv_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 16 x half> poison, half %b, i32 0
- %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x half> %splat, <vscale x 16 x half> %va
- ret <vscale x 16 x half> %vc
-}
-
-define <vscale x 32 x half> @vfmerge_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x half> %va, <vscale x 32 x half> %vb
- ret <vscale x 32 x half> %vc
-}
-
-define <vscale x 32 x half> @vfmerge_fv_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 32 x half> poison, half %b, i32 0
- %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x half> %splat, <vscale x 32 x half> %va
- ret <vscale x 32 x half> %vc
-}
-
-define <vscale x 1 x float> @vfmerge_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv1f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x float> %va, <vscale x 1 x float> %vb
- ret <vscale x 1 x float> %vc
-}
-
-define <vscale x 1 x float> @vfmerge_fv_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv1f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x float> poison, float %b, i32 0
- %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x float> %splat, <vscale x 1 x float> %va
- ret <vscale x 1 x float> %vc
-}
-
-define <vscale x 2 x float> @vfmerge_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv2f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x float> %va, <vscale x 2 x float> %vb
- ret <vscale x 2 x float> %vc
-}
-
-define <vscale x 2 x float> @vfmerge_fv_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv2f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x float> poison, float %b, i32 0
- %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x float> %splat, <vscale x 2 x float> %va
- ret <vscale x 2 x float> %vc
-}
-
-define <vscale x 4 x float> @vfmerge_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv4f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x float> %va, <vscale x 4 x float> %vb
- ret <vscale x 4 x float> %vc
-}
-
-define <vscale x 4 x float> @vfmerge_fv_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv4f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x float> poison, float %b, i32 0
- %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x float> %splat, <vscale x 4 x float> %va
- ret <vscale x 4 x float> %vc
-}
-
-define <vscale x 8 x float> @vfmerge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x float> %va, <vscale x 8 x float> %vb
- ret <vscale x 8 x float> %vc
-}
-
-define <vscale x 8 x float> @vfmerge_fv_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
- %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x float> %splat, <vscale x 8 x float> %va
- ret <vscale x 8 x float> %vc
-}
-
-define <vscale x 8 x float> @vfmerge_zv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_zv_nxv8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x float> poison, float zeroinitializer, i32 0
- %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x float> %splat, <vscale x 8 x float> %va
- ret <vscale x 8 x float> %vc
-}
-
-define <vscale x 16 x float> @vfmerge_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv16f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x float> %va, <vscale x 16 x float> %vb
- ret <vscale x 16 x float> %vc
-}
-
-define <vscale x 16 x float> @vfmerge_fv_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv16f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 16 x float> poison, float %b, i32 0
- %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x float> %splat, <vscale x 16 x float> %va
- ret <vscale x 16 x float> %vc
-}
-
-define <vscale x 1 x double> @vfmerge_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x double> %va, <vscale x 1 x double> %vb
- ret <vscale x 1 x double> %vc
-}
-
-define <vscale x 1 x double> @vfmerge_fv_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x double> poison, double %b, i32 0
- %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x double> %splat, <vscale x 1 x double> %va
- ret <vscale x 1 x double> %vc
-}
-
-define <vscale x 2 x double> @vfmerge_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x double> %va, <vscale x 2 x double> %vb
- ret <vscale x 2 x double> %vc
-}
-
-define <vscale x 2 x double> @vfmerge_fv_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x double> poison, double %b, i32 0
- %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x double> %splat, <vscale x 2 x double> %va
- ret <vscale x 2 x double> %vc
-}
-
-define <vscale x 4 x double> @vfmerge_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x double> %va, <vscale x 4 x double> %vb
- ret <vscale x 4 x double> %vc
-}
-
-define <vscale x 4 x double> @vfmerge_fv_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x double> poison, double %b, i32 0
- %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x double> %splat, <vscale x 4 x double> %va
- ret <vscale x 4 x double> %vc
-}
-
-define <vscale x 8 x double> @vfmerge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> %va, <vscale x 8 x double> %vb
- ret <vscale x 8 x double> %vc
-}
-
-define <vscale x 8 x double> @vfmerge_fv_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
- %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> %splat, <vscale x 8 x double> %va
- ret <vscale x 8 x double> %vc
-}
-
-define <vscale x 8 x double> @vfmerge_zv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_zv_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x double> poison, double zeroinitializer, i32 0
- %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> %splat, <vscale x 8 x double> %va
- ret <vscale x 8 x double> %vc
-}
-
-define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %va, <vscale x 16 x double> %vb) {
-; CHECK-LABEL: vselect_combine_regression:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vl8re64.v v8, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re64.v v8, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmseq.vi v24, v16, 0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmseq.vi v0, v16, 0
-; CHECK-NEXT: vmv.v.i v16, 0
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
- %cond = icmp eq <vscale x 16 x i64> %va, zeroinitializer
- %sel = select <vscale x 16 x i1> %cond, <vscale x 16 x double> %vb, <vscale x 16 x double> zeroinitializer
- ret <vscale x 16 x double> %sel
-}
-
-define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16 x i1> %ma, <vscale x 16 x i1> %mb, <vscale x 16 x double>* %out) {
-; CHECK-LABEL: vselect_legalize_regression:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma
-; CHECK-NEXT: vlm.v v24, (a0)
-; CHECK-NEXT: vmand.mm v1, v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a2, a0, 3
-; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v1, a2
-; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv.v.i v24, 0
-; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
-; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
-; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: vs8r.v v16, (a0)
-; CHECK-NEXT: ret
- %cond = and <vscale x 16 x i1> %ma, %mb
- %sel = select <vscale x 16 x i1> %cond, <vscale x 16 x double> %a, <vscale x 16 x double> zeroinitializer
- store <vscale x 16 x double> %sel, <vscale x 16 x double>* %out
- ret void
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x half> @vfmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %cond) {
; CHECK-LABEL: vfmerge_vv_nxv1f16:
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-
-define <vscale x 1 x i8> @vmerge_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv1i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> %va, <vscale x 1 x i8> %vb
- ret <vscale x 1 x i8> %vc
-}
-
-define <vscale x 1 x i8> @vmerge_xv_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
- %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> %splat, <vscale x 1 x i8> %va
- ret <vscale x 1 x i8> %vc
-}
-
-define <vscale x 1 x i8> @vmerge_iv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv1i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x i8> poison, i8 3, i32 0
- %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> %splat, <vscale x 1 x i8> %va
- ret <vscale x 1 x i8> %vc
-}
-
-define <vscale x 2 x i8> @vmerge_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv2i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i8> %va, <vscale x 2 x i8> %vb
- ret <vscale x 2 x i8> %vc
-}
-
-define <vscale x 2 x i8> @vmerge_xv_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
- %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i8> %splat, <vscale x 2 x i8> %va
- ret <vscale x 2 x i8> %vc
-}
-
-define <vscale x 2 x i8> @vmerge_iv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv2i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x i8> poison, i8 3, i32 0
- %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i8> %splat, <vscale x 2 x i8> %va
- ret <vscale x 2 x i8> %vc
-}
-
-define <vscale x 3 x i8> @vmerge_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv3i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 3 x i1> %cond, <vscale x 3 x i8> %va, <vscale x 3 x i8> %vb
- ret <vscale x 3 x i8> %vc
-}
-
-define <vscale x 3 x i8> @vmerge_xv_nxv3i8(<vscale x 3 x i8> %va, i8 signext %b, <vscale x 3 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv3i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
- %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
- %vc = select <vscale x 3 x i1> %cond, <vscale x 3 x i8> %splat, <vscale x 3 x i8> %va
- ret <vscale x 3 x i8> %vc
-}
-
-define <vscale x 3 x i8> @vmerge_iv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv3i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 3 x i8> poison, i8 3, i32 0
- %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
- %vc = select <vscale x 3 x i1> %cond, <vscale x 3 x i8> %splat, <vscale x 3 x i8> %va
- ret <vscale x 3 x i8> %vc
-}
-
-define <vscale x 4 x i8> @vmerge_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv4i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i8> %va, <vscale x 4 x i8> %vb
- ret <vscale x 4 x i8> %vc
-}
-
-define <vscale x 4 x i8> @vmerge_xv_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
- %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i8> %splat, <vscale x 4 x i8> %va
- ret <vscale x 4 x i8> %vc
-}
-
-define <vscale x 4 x i8> @vmerge_iv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv4i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x i8> poison, i8 3, i32 0
- %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i8> %splat, <vscale x 4 x i8> %va
- ret <vscale x 4 x i8> %vc
-}
-
-define <vscale x 8 x i8> @vmerge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv8i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i8> %va, <vscale x 8 x i8> %vb
- ret <vscale x 8 x i8> %vc
-}
-
-define <vscale x 8 x i8> @vmerge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
- %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i8> %splat, <vscale x 8 x i8> %va
- ret <vscale x 8 x i8> %vc
-}
-
-define <vscale x 8 x i8> @vmerge_iv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv8i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x i8> poison, i8 3, i32 0
- %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i8> %splat, <vscale x 8 x i8> %va
- ret <vscale x 8 x i8> %vc
-}
-
-define <vscale x 16 x i8> @vmerge_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv16i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i8> %va, <vscale x 16 x i8> %vb
- ret <vscale x 16 x i8> %vc
-}
-
-define <vscale x 16 x i8> @vmerge_xv_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv16i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
- %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i8> %splat, <vscale x 16 x i8> %va
- ret <vscale x 16 x i8> %vc
-}
-
-define <vscale x 16 x i8> @vmerge_iv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv16i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 16 x i8> poison, i8 3, i32 0
- %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i8> %splat, <vscale x 16 x i8> %va
- ret <vscale x 16 x i8> %vc
-}
-
-define <vscale x 32 x i8> @vmerge_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv32i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i8> %va, <vscale x 32 x i8> %vb
- ret <vscale x 32 x i8> %vc
-}
-
-define <vscale x 32 x i8> @vmerge_xv_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv32i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
- %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
- %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i8> %splat, <vscale x 32 x i8> %va
- ret <vscale x 32 x i8> %vc
-}
-
-define <vscale x 32 x i8> @vmerge_iv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv32i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 32 x i8> poison, i8 3, i32 0
- %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
- %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i8> %splat, <vscale x 32 x i8> %va
- ret <vscale x 32 x i8> %vc
-}
-
-define <vscale x 64 x i8> @vmerge_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv64i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 64 x i1> %cond, <vscale x 64 x i8> %va, <vscale x 64 x i8> %vb
- ret <vscale x 64 x i8> %vc
-}
-
-define <vscale x 64 x i8> @vmerge_xv_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b, <vscale x 64 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv64i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
- %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
- %vc = select <vscale x 64 x i1> %cond, <vscale x 64 x i8> %splat, <vscale x 64 x i8> %va
- ret <vscale x 64 x i8> %vc
-}
-
-define <vscale x 64 x i8> @vmerge_iv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv64i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 64 x i8> poison, i8 3, i32 0
- %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
- %vc = select <vscale x 64 x i1> %cond, <vscale x 64 x i8> %splat, <vscale x 64 x i8> %va
- ret <vscale x 64 x i8> %vc
-}
-
-define <vscale x 1 x i16> @vmerge_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv1i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i16> %va, <vscale x 1 x i16> %vb
- ret <vscale x 1 x i16> %vc
-}
-
-define <vscale x 1 x i16> @vmerge_xv_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
- %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i16> %splat, <vscale x 1 x i16> %va
- ret <vscale x 1 x i16> %vc
-}
-
-define <vscale x 1 x i16> @vmerge_iv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv1i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x i16> poison, i16 3, i32 0
- %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i16> %splat, <vscale x 1 x i16> %va
- ret <vscale x 1 x i16> %vc
-}
-
-define <vscale x 2 x i16> @vmerge_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv2i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i16> %va, <vscale x 2 x i16> %vb
- ret <vscale x 2 x i16> %vc
-}
-
-define <vscale x 2 x i16> @vmerge_xv_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
- %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i16> %splat, <vscale x 2 x i16> %va
- ret <vscale x 2 x i16> %vc
-}
-
-define <vscale x 2 x i16> @vmerge_iv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv2i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x i16> poison, i16 3, i32 0
- %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i16> %splat, <vscale x 2 x i16> %va
- ret <vscale x 2 x i16> %vc
-}
-
-define <vscale x 4 x i16> @vmerge_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv4i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i16> %va, <vscale x 4 x i16> %vb
- ret <vscale x 4 x i16> %vc
-}
-
-define <vscale x 4 x i16> @vmerge_xv_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
- %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i16> %splat, <vscale x 4 x i16> %va
- ret <vscale x 4 x i16> %vc
-}
-
-define <vscale x 4 x i16> @vmerge_iv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv4i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x i16> poison, i16 3, i32 0
- %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i16> %splat, <vscale x 4 x i16> %va
- ret <vscale x 4 x i16> %vc
-}
-
-define <vscale x 8 x i16> @vmerge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv8i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i16> %va, <vscale x 8 x i16> %vb
- ret <vscale x 8 x i16> %vc
-}
-
-define <vscale x 8 x i16> @vmerge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
- %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i16> %splat, <vscale x 8 x i16> %va
- ret <vscale x 8 x i16> %vc
-}
-
-define <vscale x 8 x i16> @vmerge_iv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv8i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x i16> poison, i16 3, i32 0
- %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i16> %splat, <vscale x 8 x i16> %va
- ret <vscale x 8 x i16> %vc
-}
-
-define <vscale x 16 x i16> @vmerge_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv16i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i16> %va, <vscale x 16 x i16> %vb
- ret <vscale x 16 x i16> %vc
-}
-
-define <vscale x 16 x i16> @vmerge_xv_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv16i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
- %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i16> %splat, <vscale x 16 x i16> %va
- ret <vscale x 16 x i16> %vc
-}
-
-define <vscale x 16 x i16> @vmerge_iv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv16i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 16 x i16> poison, i16 3, i32 0
- %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i16> %splat, <vscale x 16 x i16> %va
- ret <vscale x 16 x i16> %vc
-}
-
-define <vscale x 32 x i16> @vmerge_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv32i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i16> %va, <vscale x 32 x i16> %vb
- ret <vscale x 32 x i16> %vc
-}
-
-define <vscale x 32 x i16> @vmerge_xv_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv32i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
- %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
- %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i16> %splat, <vscale x 32 x i16> %va
- ret <vscale x 32 x i16> %vc
-}
-
-define <vscale x 32 x i16> @vmerge_iv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv32i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 32 x i16> poison, i16 3, i32 0
- %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
- %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i16> %splat, <vscale x 32 x i16> %va
- ret <vscale x 32 x i16> %vc
-}
-
-define <vscale x 1 x i32> @vmerge_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv1i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i32> %va, <vscale x 1 x i32> %vb
- ret <vscale x 1 x i32> %vc
-}
-
-define <vscale x 1 x i32> @vmerge_xv_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
- %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i32> %splat, <vscale x 1 x i32> %va
- ret <vscale x 1 x i32> %vc
-}
-
-define <vscale x 1 x i32> @vmerge_iv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv1i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x i32> poison, i32 3, i32 0
- %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i32> %splat, <vscale x 1 x i32> %va
- ret <vscale x 1 x i32> %vc
-}
-
-define <vscale x 2 x i32> @vmerge_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv2i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i32> %va, <vscale x 2 x i32> %vb
- ret <vscale x 2 x i32> %vc
-}
-
-define <vscale x 2 x i32> @vmerge_xv_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
- %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i32> %splat, <vscale x 2 x i32> %va
- ret <vscale x 2 x i32> %vc
-}
-
-define <vscale x 2 x i32> @vmerge_iv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv2i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x i32> poison, i32 3, i32 0
- %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i32> %splat, <vscale x 2 x i32> %va
- ret <vscale x 2 x i32> %vc
-}
-
-define <vscale x 4 x i32> @vmerge_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv4i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i32> %va, <vscale x 4 x i32> %vb
- ret <vscale x 4 x i32> %vc
-}
-
-define <vscale x 4 x i32> @vmerge_xv_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
- %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i32> %splat, <vscale x 4 x i32> %va
- ret <vscale x 4 x i32> %vc
-}
-
-define <vscale x 4 x i32> @vmerge_iv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv4i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
- %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i32> %splat, <vscale x 4 x i32> %va
- ret <vscale x 4 x i32> %vc
-}
-
-define <vscale x 8 x i32> @vmerge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv8i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vb
- ret <vscale x 8 x i32> %vc
-}
-
-define <vscale x 8 x i32> @vmerge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
- %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %va
- ret <vscale x 8 x i32> %vc
-}
-
-define <vscale x 8 x i32> @vmerge_iv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv8i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x i32> poison, i32 3, i32 0
- %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %va
- ret <vscale x 8 x i32> %vc
-}
-
-define <vscale x 16 x i32> @vmerge_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv16i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i32> %va, <vscale x 16 x i32> %vb
- ret <vscale x 16 x i32> %vc
-}
-
-define <vscale x 16 x i32> @vmerge_xv_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv16i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
- %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i32> %splat, <vscale x 16 x i32> %va
- ret <vscale x 16 x i32> %vc
-}
-
-define <vscale x 16 x i32> @vmerge_iv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv16i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 16 x i32> poison, i32 3, i32 0
- %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
- %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i32> %splat, <vscale x 16 x i32> %va
- ret <vscale x 16 x i32> %vc
-}
-
-define <vscale x 1 x i64> @vmerge_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv1i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %va, <vscale x 1 x i64> %vb
- ret <vscale x 1 x i64> %vc
-}
-
-define <vscale x 1 x i64> @vmerge_xv_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
- %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %splat, <vscale x 1 x i64> %va
- ret <vscale x 1 x i64> %vc
-}
-
-define <vscale x 1 x i64> @vmerge_iv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv1i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 1 x i64> poison, i64 3, i32 0
- %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
- %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %splat, <vscale x 1 x i64> %va
- ret <vscale x 1 x i64> %vc
-}
-
-define <vscale x 2 x i64> @vmerge_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %va, <vscale x 2 x i64> %vb
- ret <vscale x 2 x i64> %vc
-}
-
-define <vscale x 2 x i64> @vmerge_xv_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
- %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %splat, <vscale x 2 x i64> %va
- ret <vscale x 2 x i64> %vc
-}
-
-define <vscale x 2 x i64> @vmerge_iv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
- %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
- %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %splat, <vscale x 2 x i64> %va
- ret <vscale x 2 x i64> %vc
-}
-
-define <vscale x 4 x i64> @vmerge_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv4i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %va, <vscale x 4 x i64> %vb
- ret <vscale x 4 x i64> %vc
-}
-
-define <vscale x 4 x i64> @vmerge_xv_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
- %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %splat, <vscale x 4 x i64> %va
- ret <vscale x 4 x i64> %vc
-}
-
-define <vscale x 4 x i64> @vmerge_iv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv4i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 4 x i64> poison, i64 3, i32 0
- %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
- %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %splat, <vscale x 4 x i64> %va
- ret <vscale x 4 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv8i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
- ret <vscale x 8 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
- %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %splat, <vscale x 8 x i64> %va
- ret <vscale x 8 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_iv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv8i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT: ret
- %head = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
- %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %splat, <vscale x 8 x i64> %va
- ret <vscale x 8 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
-; CHECK-LABEL: vmerge_truelhs_nxv8i64_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ret
- %mhead = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
- %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
- %vc = select <vscale x 8 x i1> %mtrue, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
- ret <vscale x 8 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_falselhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
-; CHECK-LABEL: vmerge_falselhs_nxv8i64_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vmv8r.v v8, v16
-; CHECK-NEXT: ret
- %vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
- ret <vscale x 8 x i64> %vc
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vmerge_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %cond) {
; CHECK-LABEL: vmerge_vv_nxv1i8:
}
define <vscale x 1 x i64> @vmerge_xv_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: vmerge_xv_nxv1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vmerge_xv_nxv1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT: vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
%vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %splat, <vscale x 1 x i64> %va
}
define <vscale x 2 x i64> @vmerge_xv_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: vmerge_xv_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vmerge_xv_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV64-NEXT: vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %splat, <vscale x 2 x i64> %va
}
define <vscale x 4 x i64> @vmerge_xv_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: vmerge_xv_nxv4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vmerge_xv_nxv4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV64-NEXT: vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %splat, <vscale x 4 x i64> %va
}
define <vscale x 8 x i64> @vmerge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: vmerge_xv_nxv8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vmerge_xv_nxv8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT: vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
%vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %splat, <vscale x 8 x i64> %va
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vxor.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 9,
- <vscale x 64 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vxor.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen);
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vxor.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vxor.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vxor.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vxor.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vxor.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vxor.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vxor.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vxor.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vxor.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vxor.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vxor.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vxor.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vxor.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vxor.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vxor.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vxor.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vxor.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vxor.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vxor.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vxor.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
<vscale x 64 x i8> %1,
i8 9,
<vscale x 64 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
<vscale x 32 x i16> %1,
i16 9,
<vscale x 32 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
<vscale x 16 x i32> %1,
i32 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i64> %a
}