; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoaddei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoaddei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoaddei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoaddei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoaddei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoaddei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoaddei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoaddei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoaddei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoaddei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoaddei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoaddei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoaddei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoaddei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoaddei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoaddei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
<vscale x 1 x i32>,
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoaddei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoaddei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoaddei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoaddei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoaddei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoaddei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoaddei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoaddei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoaddei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoaddei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoaddei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoaddei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoaddei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoaddei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoaddei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoaddei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoaddei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoaddei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
<vscale x 1 x i8>,
ret <vscale x 16 x i32> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoaddei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoaddei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoaddei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoaddei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoaddei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoaddei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoaddei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoaddei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoandei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoandei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoandei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoandei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoandei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoandei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoandei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoandei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoandei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoandei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoandei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoandei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoandei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoandei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoandei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoandei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
<vscale x 1 x i32>,
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoandei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoandei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoandei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoandei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoandei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoandei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoandei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoandei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoandei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoandei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoandei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoandei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoandei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoandei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoandei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoandei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoandei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoandei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
<vscale x 1 x i8>,
ret <vscale x 16 x i32> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoandei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoandei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoandei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoandei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoandei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoandei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoandei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoandei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
<vscale x 1 x i32>,
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamomaxei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamomaxei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
<vscale x 1 x i8>,
ret <vscale x 16 x i32> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
<vscale x 1 x i32>*,
<vscale x 1 x i16>,
<vscale x 4 x i1> %3,
i32 %4)
- ret <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i32>,
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
i32);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i16:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
- <vscale x 8 x i32> *%0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i32> %2,
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
i32 %3)
- ret <vscale x 8 x i32> %a
+ ret <vscale x 4 x i64> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
i32);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32> *%0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
i32 %4)
- ret <vscale x 8 x i32> %a
+ ret <vscale x 4 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i32>,
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i16:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
ret <vscale x 16 x i32> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
i32);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
- <vscale x 1 x i32> *%0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
i32 %3)
- ret <vscale x 1 x i32> %a
+ ret <vscale x 1 x i64> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> *%0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT: vmv1r.v v8, v9
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
- <vscale x 2 x i32> *%0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v9
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> *%0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i32> %a
+ ret <vscale x 1 x i64> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
i32);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
- <vscale x 4 x i32> *%0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
i32 %3)
- ret <vscale x 4 x i32> %a
+ ret <vscale x 2 x i64> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
i32);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> *%0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
i32 %4)
- ret <vscale x 4 x i32> %a
+ ret <vscale x 2 x i64> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
i32);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
- <vscale x 8 x i32> *%0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
i32 %3)
- ret <vscale x 8 x i32> %a
+ ret <vscale x 4 x i64> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
i32);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> *%0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
i32 %4)
- ret <vscale x 8 x i32> %a
+ ret <vscale x 4 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,
- <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
<vscale x 1 x i32> *%0,
- <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
i64 %3)
ret <vscale x 1 x i32> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,
- <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> *%0,
- <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x i32> %a
}
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
<vscale x 2 x i32>*,
- <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
<vscale x 2 x i32> *%0,
- <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
i64 %3)
ret <vscale x 2 x i32> %a
}
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32>*,
- <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> *%0,
- <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x i32> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
<vscale x 4 x i32>*,
- <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
<vscale x 4 x i32> *%0,
- <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
i64 %3)
ret <vscale x 4 x i32> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32>*,
- <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> *%0,
- <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x i32> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
<vscale x 8 x i32>*,
- <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
-; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
<vscale x 8 x i32> *%0,
- <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
i64 %3)
ret <vscale x 8 x i32> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32>*,
- <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> *%0,
- <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
-; CHECK-NEXT: vmv8r.v v8, v16
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT: vmv8r.v v8, v16
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
<vscale x 1 x i64>*,
- <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i32:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
<vscale x 1 x i64> *%0,
- <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i64 %3)
ret <vscale x 1 x i64> %a
}
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>*,
- <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> *%0,
- <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x i64> %a
}
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
<vscale x 2 x i64>*,
- <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i32:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
<vscale x 2 x i64> *%0,
- <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i64 %3)
ret <vscale x 2 x i64> %a
}
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>*,
- <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> *%0,
- <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x i64> %a
}
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
<vscale x 4 x i64>*,
- <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i32:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
<vscale x 4 x i64> *%0,
- <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i64 %3)
ret <vscale x 4 x i64> %a
}
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>*,
- <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> *%0,
- <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x i64> %a
}
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
<vscale x 8 x i64>*,
- <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i32:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
<vscale x 8 x i64> *%0,
- <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
i64 %3)
ret <vscale x 8 x i64> %a
}
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>*,
- <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i64);
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> *%0,
- <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x i64> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
- <vscale x 1 x i16>,
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i16:
+define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
<vscale x 1 x i32> *%0,
- <vscale x 1 x i16> %1,
+ <vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i64 %3)
ret <vscale x 1 x i32> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
- <vscale x 1 x i16>,
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16:
+define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
<vscale x 1 x i32> *%0,
- <vscale x 1 x i16> %1,
+ <vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x i32> %a
}
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
<vscale x 2 x i32>*,
- <vscale x 2 x i16>,
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i16:
+define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
<vscale x 2 x i32> *%0,
- <vscale x 2 x i16> %1,
+ <vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i64 %3)
ret <vscale x 2 x i32> %a
}
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>*,
- <vscale x 2 x i16>,
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16:
+define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32> *%0,
- <vscale x 2 x i16> %1,
+ <vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x i32> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
<vscale x 4 x i32>*,
- <vscale x 4 x i16>,
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i16:
+define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
<vscale x 4 x i32> *%0,
- <vscale x 4 x i16> %1,
+ <vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i64 %3)
ret <vscale x 4 x i32> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
<vscale x 4 x i32>*,
- <vscale x 4 x i16>,
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16:
+define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
<vscale x 4 x i32> *%0,
- <vscale x 4 x i16> %1,
+ <vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x i32> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
<vscale x 8 x i32>*,
- <vscale x 8 x i16>,
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i16:
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
<vscale x 8 x i32> *%0,
- <vscale x 8 x i16> %1,
+ <vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i64 %3)
ret <vscale x 8 x i32> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
<vscale x 8 x i32>*,
- <vscale x 8 x i16>,
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16:
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
<vscale x 8 x i32> *%0,
- <vscale x 8 x i16> %1,
+ <vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
<vscale x 16 x i32>*,
- <vscale x 16 x i16>,
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i16:
+define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
<vscale x 16 x i32> *%0,
- <vscale x 16 x i16> %1,
+ <vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
i64 %3)
ret <vscale x 16 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
<vscale x 16 x i32>*,
- <vscale x 16 x i16>,
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
i64);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16:
+define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
<vscale x 16 x i32> *%0,
- <vscale x 16 x i16> %1,
+ <vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x i32> %a
}
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
<vscale x 1 x i64>*,
- <vscale x 1 x i16>,
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i64);
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i16:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
<vscale x 1 x i64> *%0,
- <vscale x 1 x i16> %1,
+ <vscale x 1 x i32> %1,
<vscale x 1 x i64> %2,
i64 %3)
ret <vscale x 1 x i64> %a
}
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64>*,
- <vscale x 1 x i16>,
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64> *%0,
- <vscale x 1 x i16> %1,
+ <vscale x 1 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x i64> %a
}
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
<vscale x 2 x i64>*,
- <vscale x 2 x i16>,
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i64);
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i16:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
<vscale x 2 x i64> *%0,
- <vscale x 2 x i16> %1,
+ <vscale x 2 x i32> %1,
<vscale x 2 x i64> %2,
i64 %3)
ret <vscale x 2 x i64> %a
}
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64>*,
- <vscale x 2 x i16>,
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64> *%0,
- <vscale x 2 x i16> %1,
+ <vscale x 2 x i32> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x i64> %a
}
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
<vscale x 4 x i64>*,
- <vscale x 4 x i16>,
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i64);
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i16:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
<vscale x 4 x i64> *%0,
- <vscale x 4 x i16> %1,
+ <vscale x 4 x i32> %1,
<vscale x 4 x i64> %2,
i64 %3)
ret <vscale x 4 x i64> %a
}
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64>*,
- <vscale x 4 x i16>,
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64> *%0,
- <vscale x 4 x i16> %1,
+ <vscale x 4 x i32> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x i64> %a
}
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
<vscale x 8 x i64>*,
- <vscale x 8 x i16>,
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i64);
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i16:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
<vscale x 8 x i64> *%0,
- <vscale x 8 x i16> %1,
+ <vscale x 8 x i32> %1,
<vscale x 8 x i64> %2,
i64 %3)
ret <vscale x 8 x i64> %a
}
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64>*,
- <vscale x 8 x i16>,
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i64);
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
-; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64> *%0,
- <vscale x 8 x i16> %1,
+ <vscale x 8 x i32> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x i64> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
<vscale x 1 x i32>*,
- <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i64);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i8:
+define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
<vscale x 1 x i32> *%0,
- <vscale x 1 x i8> %1,
+ <vscale x 1 x i16> %1,
<vscale x 1 x i32> %2,
i64 %3)
ret <vscale x 1 x i32> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32>*,
- <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8:
+define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32> *%0,
- <vscale x 1 x i8> %1,
+ <vscale x 1 x i16> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x i32> %a
}
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
<vscale x 2 x i32>*,
- <vscale x 2 x i8>,
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i64);
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i8:
+define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
<vscale x 2 x i32> *%0,
- <vscale x 2 x i8> %1,
+ <vscale x 2 x i16> %1,
<vscale x 2 x i32> %2,
i64 %3)
ret <vscale x 2 x i32> %a
}
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32>*,
- <vscale x 2 x i8>,
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8:
+define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32> *%0,
- <vscale x 2 x i8> %1,
+ <vscale x 2 x i16> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x i32> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
<vscale x 4 x i32>*,
- <vscale x 4 x i8>,
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i64);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i8:
+define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
<vscale x 4 x i32> *%0,
- <vscale x 4 x i8> %1,
+ <vscale x 4 x i16> %1,
<vscale x 4 x i32> %2,
i64 %3)
ret <vscale x 4 x i32> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32>*,
- <vscale x 4 x i8>,
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8:
+define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32> *%0,
- <vscale x 4 x i8> %1,
+ <vscale x 4 x i16> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x i32> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
<vscale x 8 x i32>*,
- <vscale x 8 x i8>,
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i64);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i8:
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
<vscale x 8 x i32> *%0,
- <vscale x 8 x i8> %1,
+ <vscale x 8 x i16> %1,
<vscale x 8 x i32> %2,
i64 %3)
ret <vscale x 8 x i32> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32>*,
- <vscale x 8 x i8>,
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8:
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32> *%0,
- <vscale x 8 x i8> %1,
+ <vscale x 8 x i16> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
<vscale x 16 x i32>*,
- <vscale x 16 x i8>,
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i64);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i8:
+define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
<vscale x 16 x i32> *%0,
- <vscale x 16 x i8> %1,
+ <vscale x 16 x i16> %1,
<vscale x 16 x i32> %2,
i64 %3)
ret <vscale x 16 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32>*,
- <vscale x 16 x i8>,
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
i64);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8:
+define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32> *%0,
- <vscale x 16 x i8> %1,
+ <vscale x 16 x i16> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x i32> %a
}
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
<vscale x 1 x i64>*,
- <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
<vscale x 1 x i64>,
i64);
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i8:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
<vscale x 1 x i64> *%0,
- <vscale x 1 x i8> %1,
+ <vscale x 1 x i16> %1,
<vscale x 1 x i64> %2,
i64 %3)
ret <vscale x 1 x i64> %a
}
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
<vscale x 1 x i64>*,
- <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
<vscale x 1 x i64> *%0,
- <vscale x 1 x i8> %1,
+ <vscale x 1 x i16> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x i64> %a
}
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
<vscale x 2 x i64>*,
- <vscale x 2 x i8>,
+ <vscale x 2 x i16>,
<vscale x 2 x i64>,
i64);
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i8:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
<vscale x 2 x i64> *%0,
- <vscale x 2 x i8> %1,
+ <vscale x 2 x i16> %1,
<vscale x 2 x i64> %2,
i64 %3)
ret <vscale x 2 x i64> %a
}
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
<vscale x 2 x i64>*,
- <vscale x 2 x i8>,
+ <vscale x 2 x i16>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
<vscale x 2 x i64> *%0,
- <vscale x 2 x i8> %1,
+ <vscale x 2 x i16> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x i64> %a
}
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
<vscale x 4 x i64>*,
- <vscale x 4 x i8>,
+ <vscale x 4 x i16>,
<vscale x 4 x i64>,
i64);
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i8:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
<vscale x 4 x i64> *%0,
- <vscale x 4 x i8> %1,
+ <vscale x 4 x i16> %1,
<vscale x 4 x i64> %2,
i64 %3)
ret <vscale x 4 x i64> %a
}
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
<vscale x 4 x i64>*,
- <vscale x 4 x i8>,
+ <vscale x 4 x i16>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
<vscale x 4 x i64> *%0,
- <vscale x 4 x i8> %1,
+ <vscale x 4 x i16> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x i64> %a
}
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
<vscale x 8 x i64>*,
- <vscale x 8 x i8>,
+ <vscale x 8 x i16>,
<vscale x 8 x i64>,
i64);
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i8:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
<vscale x 8 x i64> *%0,
- <vscale x 8 x i8> %1,
+ <vscale x 8 x i16> %1,
<vscale x 8 x i64> %2,
i64 %3)
ret <vscale x 8 x i64> %a
}
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
<vscale x 8 x i64>*,
- <vscale x 8 x i8>,
+ <vscale x 8 x i16>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i64);
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
-; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
<vscale x 8 x i64> *%0,
- <vscale x 8 x i8> %1,
+ <vscale x 8 x i16> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x i64> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
- <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
<vscale x 1 x i32>,
i64);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i64:
+define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
<vscale x 1 x i32> *%0,
- <vscale x 1 x i64> %1,
+ <vscale x 1 x i8> %1,
<vscale x 1 x i32> %2,
i64 %3)
ret <vscale x 1 x i32> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
- <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64:
+define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
<vscale x 1 x i32> *%0,
- <vscale x 1 x i64> %1,
+ <vscale x 1 x i8> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x i32> %a
}
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
<vscale x 2 x i32>*,
- <vscale x 2 x i64>,
+ <vscale x 2 x i8>,
<vscale x 2 x i32>,
i64);
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i64:
+define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10
-; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
<vscale x 2 x i32> *%0,
- <vscale x 2 x i64> %1,
+ <vscale x 2 x i8> %1,
<vscale x 2 x i32> %2,
i64 %3)
ret <vscale x 2 x i32> %a
}
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
<vscale x 2 x i32>*,
- <vscale x 2 x i64>,
+ <vscale x 2 x i8>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64:
+define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
<vscale x 2 x i32> *%0,
- <vscale x 2 x i64> %1,
+ <vscale x 2 x i8> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x i32> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
<vscale x 4 x i32>*,
- <vscale x 4 x i64>,
+ <vscale x 4 x i8>,
<vscale x 4 x i32>,
i64);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i64:
+define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12
-; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
<vscale x 4 x i32> *%0,
- <vscale x 4 x i64> %1,
+ <vscale x 4 x i8> %1,
<vscale x 4 x i32> %2,
i64 %3)
ret <vscale x 4 x i32> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
<vscale x 4 x i32>*,
- <vscale x 4 x i64>,
+ <vscale x 4 x i8>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64:
+define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
<vscale x 4 x i32> *%0,
- <vscale x 4 x i64> %1,
+ <vscale x 4 x i8> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x i32> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
<vscale x 8 x i32>*,
- <vscale x 8 x i64>,
+ <vscale x 8 x i8>,
<vscale x 8 x i32>,
i64);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i64:
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16
-; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
<vscale x 8 x i32> *%0,
- <vscale x 8 x i64> %1,
+ <vscale x 8 x i8> %1,
<vscale x 8 x i32> %2,
i64 %3)
ret <vscale x 8 x i32> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
<vscale x 8 x i32>*,
- <vscale x 8 x i64>,
+ <vscale x 8 x i8>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64:
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
<vscale x 8 x i32> *%0,
- <vscale x 8 x i64> %1,
+ <vscale x 8 x i8> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x i32> %a
}
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
<vscale x 1 x i64>*,
- <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
<vscale x 1 x i64>,
i64);
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i64:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
<vscale x 1 x i64> *%0,
- <vscale x 1 x i64> %1,
+ <vscale x 1 x i8> %1,
<vscale x 1 x i64> %2,
i64 %3)
ret <vscale x 1 x i64> %a
}
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
<vscale x 1 x i64>*,
- <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
<vscale x 1 x i64> *%0,
- <vscale x 1 x i64> %1,
+ <vscale x 1 x i8> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x i64> %a
}
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
<vscale x 2 x i64>*,
- <vscale x 2 x i64>,
+ <vscale x 2 x i8>,
<vscale x 2 x i64>,
i64);
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i64:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
<vscale x 2 x i64> *%0,
- <vscale x 2 x i64> %1,
+ <vscale x 2 x i8> %1,
<vscale x 2 x i64> %2,
i64 %3)
ret <vscale x 2 x i64> %a
}
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
<vscale x 2 x i64>*,
- <vscale x 2 x i64>,
+ <vscale x 2 x i8>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
<vscale x 2 x i64> *%0,
- <vscale x 2 x i64> %1,
+ <vscale x 2 x i8> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x i64> %a
}
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
<vscale x 4 x i64>*,
- <vscale x 4 x i64>,
+ <vscale x 4 x i8>,
<vscale x 4 x i64>,
i64);
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i64:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
<vscale x 4 x i64> *%0,
- <vscale x 4 x i64> %1,
+ <vscale x 4 x i8> %1,
<vscale x 4 x i64> %2,
i64 %3)
ret <vscale x 4 x i64> %a
}
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
<vscale x 4 x i64>*,
- <vscale x 4 x i64>,
+ <vscale x 4 x i8>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
<vscale x 4 x i64> *%0,
- <vscale x 4 x i64> %1,
+ <vscale x 4 x i8> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x i64> %a
}
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
<vscale x 8 x i64>*,
- <vscale x 8 x i64>,
+ <vscale x 8 x i8>,
<vscale x 8 x i64>,
i64);
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i64:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
<vscale x 8 x i64> *%0,
- <vscale x 8 x i64> %1,
+ <vscale x 8 x i8> %1,
<vscale x 8 x i64> %2,
i64 %3)
ret <vscale x 8 x i64> %a
}
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
<vscale x 8 x i64>*,
- <vscale x 8 x i64>,
+ <vscale x 8 x i8>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i64);
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
-; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
<vscale x 8 x i64> *%0,
- <vscale x 8 x i64> %1,
+ <vscale x 8 x i8> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i64 %4)
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamominei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamominei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamominei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamominei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamominei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamominei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamominei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamominei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
<vscale x 1 x i32>,
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamominei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamominei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamominei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamominei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
<vscale x 1 x i8>,
ret <vscale x 16 x i32> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamominuei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamominuei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamominuei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamominuei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamominuei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamominuei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamominuei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamominuei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominuei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominuei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominuei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominuei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominuei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominuei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominuei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominuei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
<vscale x 1 x i32>,
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamominuei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamominuei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominuei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominuei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominuei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominuei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamominuei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamominuei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominuei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominuei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominuei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominuei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominuei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominuei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominuei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominuei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
<vscale x 1 x i8>,
ret <vscale x 16 x i32> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominuei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamominuei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominuei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamominuei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominuei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamominuei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominuei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamominuei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoorei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoorei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoorei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoorei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoorei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoorei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoorei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoorei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoorei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoorei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoorei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoorei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoorei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoorei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoorei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoorei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
<vscale x 1 x i32>,
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoorei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoorei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoorei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoorei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoorei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoorei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoorei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoorei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoorei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoorei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoorei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoorei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoorei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoorei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoorei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoorei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoorei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoorei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
<vscale x 1 x i8>,
ret <vscale x 16 x i32> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoorei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoorei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoorei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoorei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoorei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoorei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoorei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoorei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i64(<vscale x 1 x float> *%0, <vscale x 1 x i64> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
+ <vscale x 1 x float> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64(<vscale x 1 x float> *%0, <vscale x 1 x i64> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i64(<vscale x 2 x float> *%0, <vscale x 2 x i64> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
+ <vscale x 2 x float> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64(<vscale x 2 x float> *%0, <vscale x 2 x i64> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i64(<vscale x 4 x float> *%0, <vscale x 4 x i64> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
+ <vscale x 4 x float> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64(<vscale x 4 x float> *%0, <vscale x 4 x i64> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i64(<vscale x 8 x float> *%0, <vscale x 8 x i64> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
+ <vscale x 8 x float> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64(<vscale x 8 x float> *%0, <vscale x 8 x i64> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x double>,
+ i32);
+
+define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i64(<vscale x 1 x double> *%0, <vscale x 1 x i64> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
+ <vscale x 1 x double> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x double> %2,
+ i32 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64(<vscale x 1 x double> *%0, <vscale x 1 x i64> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x double>,
+ i32);
+
+define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i64(<vscale x 2 x double> *%0, <vscale x 2 x i64> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
+ <vscale x 2 x double> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x double> %2,
+ i32 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64(<vscale x 2 x double> *%0, <vscale x 2 x i64> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x double>,
+ i32);
+
+define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i64(<vscale x 4 x double> *%0, <vscale x 4 x i64> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
+ <vscale x 4 x double> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x double> %2,
+ i32 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64(<vscale x 4 x double> *%0, <vscale x 4 x i64> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x double>,
+ i32);
+
+define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i64(<vscale x 8 x double> *%0, <vscale x 8 x i64> %1, <vscale x 8 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
+ <vscale x 8 x double> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x double> %2,
+ i32 %3)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x double>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64(<vscale x 8 x double> *%0, <vscale x 8 x i64> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x double> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
<vscale x 1 x i32>,
<vscale x 16 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
<vscale x 1 x float>*,
<vscale x 1 x i16>,
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
<vscale x 1 x float>*,
<vscale x 1 x i8>,
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoxorei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamoxorei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoxorei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamoxorei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoxorei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamoxorei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoxorei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamoxorei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoxorei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoxorei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoxorei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoxorei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoxorei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoxorei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoxorei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoxorei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
<vscale x 1 x i32>*,
<vscale x 1 x i32>,
ret <vscale x 8 x i32> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoxorei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamoxorei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoxorei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoxorei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoxorei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoxorei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoxorei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoxorei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoxorei32.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vamoxorei32.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoxorei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoxorei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoxorei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoxorei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoxorei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoxorei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoxorei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoxorei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
<vscale x 1 x i8>,
ret <vscale x 16 x i32> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoxorei8.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamoxorei8.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoxorei8.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamoxorei8.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoxorei8.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamoxorei8.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoxorei8.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamoxorei8.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
<vscale x 1 x i32>*,